Refactor to use Volume IDs and remove ambiguity

pull/6/head
Jamie Hannaford 2017-02-21 11:58:16 +01:00
parent 7c76e3994c
commit 4bd71a3b77
6 changed files with 243 additions and 309 deletions

View File

@ -31,7 +31,6 @@ import (
volumes_v1 "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes"
volumes_v2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach"
"github.com/gophercloud/gophercloud/pagination"
"github.com/prometheus/client_golang/prometheus"
"github.com/golang/glog"
@ -39,7 +38,7 @@ import (
type volumeService interface {
createVolume(opts VolumeCreateOpts) (string, string, error)
getVolume(diskName string) (Volume, error)
getVolume(volumeID string) (Volume, error)
deleteVolume(volumeName string) error
}
@ -123,107 +122,73 @@ func (volumes *VolumesV2) createVolume(opts VolumeCreateOpts) (string, string, e
return vol.ID, vol.AvailabilityZone, nil
}
func (volumes *VolumesV1) getVolume(diskName string) (Volume, error) {
var volume_v1 volumes_v1.Volume
var volume Volume
func (volumes *VolumesV1) getVolume(volumeID string) (Volume, error) {
startTime := time.Now()
err := volumes_v1.List(volumes.blockstorage, nil).EachPage(func(page pagination.Page) (bool, error) {
vols, err := volumes_v1.ExtractVolumes(page)
if err != nil {
glog.Errorf("Failed to extract volumes: %v", err)
return false, err
} else {
for _, v := range vols {
glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
if v.Name == diskName || strings.Contains(v.ID, diskName) {
volume_v1 = v
return true, nil
}
}
}
// if it reached here then no disk with the given name was found.
errmsg := fmt.Sprintf("Unable to find disk: %s", diskName)
return false, errors.New(errmsg)
})
volumeV1, err := volumes_v1.Get(volumes.blockstorage, volumeID).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("get_v1_volume", timeTaken, err)
if err != nil {
glog.Errorf("Error occurred getting volume: %s", diskName)
return volume, err
glog.Errorf("Error occurred getting volume by ID: %s", volumeID)
return Volume{}, err
}
volume.ID = volume_v1.ID
volume.Name = volume_v1.Name
volume.Status = volume_v1.Status
volume := Volume{
ID: volumeV1.ID,
Name: volumeV1.Name,
Status: volumeV1.Status,
}
if len(volume_v1.Attachments) > 0 && volume_v1.Attachments[0]["server_id"] != nil {
volume.AttachedServerId = volume_v1.Attachments[0]["server_id"].(string)
volume.AttachedDevice = volume_v1.Attachments[0]["device"].(string)
if len(volumeV1.Attachments) > 0 && volumeV1.Attachments[0]["server_id"] != nil {
volume.AttachedServerId = volumeV1.Attachments[0]["server_id"].(string)
volume.AttachedDevice = volumeV1.Attachments[0]["device"].(string)
}
return volume, nil
}
func (volumes *VolumesV2) getVolume(diskName string) (Volume, error) {
var volume_v2 volumes_v2.Volume
var volume Volume
func (volumes *VolumesV2) getVolume(volumeID string) (Volume, error) {
startTime := time.Now()
err := volumes_v2.List(volumes.blockstorage, nil).EachPage(func(page pagination.Page) (bool, error) {
vols, err := volumes_v2.ExtractVolumes(page)
if err != nil {
glog.Errorf("Failed to extract volumes: %v", err)
return false, err
} else {
for _, v := range vols {
glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
if v.Name == diskName || strings.Contains(v.ID, diskName) {
volume_v2 = v
return true, nil
}
}
}
// if it reached here then no disk with the given name was found.
errmsg := fmt.Sprintf("Unable to find disk: %s", diskName)
return false, errors.New(errmsg)
})
volumeV2, err := volumes_v2.Get(volumes.blockstorage, volumeID).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("get_v2_volume", timeTaken, err)
if err != nil {
glog.Errorf("Error occurred getting volume: %s", diskName)
return volume, err
glog.Errorf("Error occurred getting volume by ID: %s", volumeID)
return Volume{}, err
}
volume.ID = volume_v2.ID
volume.Name = volume_v2.Name
volume.Status = volume_v2.Status
volume := Volume{
ID: volumeV2.ID,
Name: volumeV2.Name,
Status: volumeV2.Status,
}
if len(volume_v2.Attachments) > 0 {
volume.AttachedServerId = volume_v2.Attachments[0].ServerID
volume.AttachedDevice = volume_v2.Attachments[0].Device
if len(volumeV2.Attachments) > 0 {
volume.AttachedServerId = volumeV2.Attachments[0].ServerID
volume.AttachedDevice = volumeV2.Attachments[0].Device
}
return volume, nil
}
func (volumes *VolumesV1) deleteVolume(volumeName string) error {
func (volumes *VolumesV1) deleteVolume(volumeID string) error {
startTime := time.Now()
err := volumes_v1.Delete(volumes.blockstorage, volumeName).ExtractErr()
err := volumes_v1.Delete(volumes.blockstorage, volumeID).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("delete_v1_volume", timeTaken, err)
if err != nil {
glog.Errorf("Cannot delete volume %s: %v", volumeName, err)
glog.Errorf("Cannot delete volume %s: %v", volumeID, err)
}
return err
}
func (volumes *VolumesV2) deleteVolume(volumeName string) error {
func (volumes *VolumesV2) deleteVolume(volumeID string) error {
startTime := time.Now()
err := volumes_v2.Delete(volumes.blockstorage, volumeName).ExtractErr()
err := volumes_v2.Delete(volumes.blockstorage, volumeID).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("delete_v2_volume", timeTaken, err)
if err != nil {
glog.Errorf("Cannot delete volume %s: %v", volumeName, err)
glog.Errorf("Cannot delete volume %s: %v", volumeID, err)
}
return err
@ -246,8 +211,8 @@ func (os *OpenStack) OperationPending(diskName string) (bool, string, error) {
}
// Attaches given cinder volume to the compute running kubelet
func (os *OpenStack) AttachDisk(instanceID string, diskName string) (string, error) {
volume, err := os.getVolume(diskName)
func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) {
volume, err := os.getVolume(volumeID)
if err != nil {
return "", err
}
@ -266,11 +231,11 @@ func (os *OpenStack) AttachDisk(instanceID string, diskName string) (string, err
if volume.AttachedServerId != "" {
if instanceID == volume.AttachedServerId {
glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, instanceID)
glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID)
return volume.ID, nil
}
glog.V(2).Infof("Disk %q is attached to a different compute (%q), detaching", diskName, volume.AttachedServerId)
err = os.DetachDisk(volume.AttachedServerId, diskName)
glog.V(2).Infof("Disk %s is attached to a different instance (%s), detaching", volumeID, volume.AttachedServerId)
err = os.DetachDisk(volume.AttachedServerId, volumeID)
if err != nil {
return "", err
}
@ -284,16 +249,16 @@ func (os *OpenStack) AttachDisk(instanceID string, diskName string) (string, err
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("attach_disk", timeTaken, err)
if err != nil {
glog.Errorf("Failed to attach %s volume to %s compute: %v", diskName, instanceID, err)
glog.Errorf("Failed to attach %s volume to %s compute: %v", volumeID, instanceID, err)
return "", err
}
glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, instanceID)
glog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID)
return volume.ID, nil
}
// Detaches given cinder volume from the compute running kubelet
func (os *OpenStack) DetachDisk(instanceID string, partialDiskId string) error {
volume, err := os.getVolume(partialDiskId)
// DetachDisk detaches given cinder volume from the compute running kubelet
func (os *OpenStack) DetachDisk(instanceID, volumeID string) error {
volume, err := os.getVolume(volumeID)
if err != nil {
return err
}
@ -330,26 +295,24 @@ func (os *OpenStack) DetachDisk(instanceID string, partialDiskId string) error {
return nil
}
// Takes a partial/full disk id or diskname
func (os *OpenStack) getVolume(diskName string) (Volume, error) {
// Retrieves Volume by its ID.
func (os *OpenStack) getVolume(volumeID string) (Volume, error) {
volumes, err := os.volumeService("")
if err != nil || volumes == nil {
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
return Volume{}, err
}
return volumes.getVolume(diskName)
return volumes.getVolume(volumeID)
}
// Create a volume of given size (in GiB)
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error) {
volumes, err := os.volumeService("")
if err != nil || volumes == nil {
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
return "", "", err
}
opts := VolumeCreateOpts{
Name: name,
Size: size,
@ -359,27 +322,28 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str
if tags != nil {
opts.Metadata = *tags
}
volumeId, volumeAZ, err := volumes.createVolume(opts)
volumeID, volumeAZ, err := volumes.createVolume(opts)
if err != nil {
glog.Errorf("Failed to create a %d GB volume: %v", size, err)
return "", "", err
}
glog.Infof("Created volume %v in Availability Zone: %v", volumeId, volumeAZ)
return volumeId, volumeAZ, nil
glog.Infof("Created volume %v in Availability Zone: %v", volumeID, volumeAZ)
return volumeID, volumeAZ, nil
}
// GetDevicePath returns the path of an attached block storage volume, specified by its id.
func (os *OpenStack) GetDevicePath(diskId string) string {
func (os *OpenStack) GetDevicePath(volumeID string) string {
// Build a list of candidate device paths
candidateDeviceNodes := []string{
// KVM
fmt.Sprintf("virtio-%s", diskId[:20]),
fmt.Sprintf("virtio-%s", volumeID[:20]),
// KVM virtio-scsi
fmt.Sprintf("scsi-0QEMU_QEMU_HARDDISK_%s", diskId[:20]),
fmt.Sprintf("scsi-0QEMU_QEMU_HARDDISK_%s", volumeID[:20]),
// ESXi
fmt.Sprintf("wwn-0x%s", strings.Replace(diskId, "-", "", -1)),
fmt.Sprintf("wwn-0x%s", strings.Replace(volumeID, "-", "", -1)),
}
files, _ := ioutil.ReadDir("/dev/disk/by-id/")
@ -393,17 +357,17 @@ func (os *OpenStack) GetDevicePath(diskId string) string {
}
}
glog.Warningf("Failed to find device for the diskid: %q\n", diskId)
glog.Warningf("Failed to find device for the volumeID: %q\n", volumeID)
return ""
}
func (os *OpenStack) DeleteVolume(volumeName string) error {
used, err := os.diskIsUsed(volumeName)
func (os *OpenStack) DeleteVolume(volumeID string) error {
used, err := os.diskIsUsed(volumeID)
if err != nil {
return err
}
if used {
msg := fmt.Sprintf("Cannot delete the volume %q, it's still attached to a node", volumeName)
msg := fmt.Sprintf("Cannot delete the volume %q, it's still attached to a node", volumeID)
return k8s_volume.NewDeletedVolumeInUseError(msg)
}
@ -413,19 +377,19 @@ func (os *OpenStack) DeleteVolume(volumeName string) error {
return err
}
err = volumes.deleteVolume(volumeName)
err = volumes.deleteVolume(volumeID)
if err != nil {
glog.Errorf("Cannot delete volume %s: %v", volumeName, err)
glog.Errorf("Cannot delete volume %s: %v", volumeID, err)
}
return nil
}
// Get device path of attached volume to the compute running kubelet, as known by cinder
func (os *OpenStack) GetAttachmentDiskPath(instanceID string, diskName string) (string, error) {
func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
// See issue #33128 - Cinder does not always tell you the right device path, as such
// we must only use this value as a last resort.
volume, err := os.getVolume(diskName)
volume, err := os.getVolume(volumeID)
if err != nil {
return "", err
}
@ -440,47 +404,41 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID string, diskName string) (
// see http://developer.openstack.org/api-ref-blockstorage-v1.html
return volume.AttachedDevice, nil
} else {
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, volume.AttachedServerId)
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerId)
glog.Errorf(errMsg)
return "", errors.New(errMsg)
}
}
return "", fmt.Errorf("volume %s has not ServerId.", diskName)
return "", fmt.Errorf("volume %s has no ServerId.", volumeID)
}
// query if a volume is attached to a compute instance
func (os *OpenStack) DiskIsAttached(diskName, instanceID string) (bool, error) {
volume, err := os.getVolume(diskName)
func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) {
volume, err := os.getVolume(volumeID)
if err != nil {
return false, err
}
if instanceID == volume.AttachedServerId {
return true, nil
}
return false, nil
return instanceID == volume.AttachedServerId, nil
}
// query if a list of volumes are attached to a compute instance
func (os *OpenStack) DisksAreAttached(diskNames []string, instanceID string) (map[string]bool, error) {
func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
attached := make(map[string]bool)
for _, diskName := range diskNames {
is_attached, _ := os.DiskIsAttached(diskName, instanceID)
attached[diskName] = is_attached
for _, volumeID := range volumeIDs {
isAttached, _ := os.DiskIsAttached(instanceID, volumeID)
attached[volumeID] = isAttached
}
return attached, nil
}
// diskIsUsed returns true a disk is attached to any node.
func (os *OpenStack) diskIsUsed(diskName string) (bool, error) {
volume, err := os.getVolume(diskName)
func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) {
volume, err := os.getVolume(volumeID)
if err != nil {
return false, err
}
if volume.AttachedServerId != "" {
return true, nil
}
return false, nil
return volume.AttachedServerId != "", nil
}
// query if we should trust the cinder provide deviceName, See issue #33128

View File

@ -25,7 +25,6 @@ import (
"net"
"os"
"regexp"
"strings"
"time"
"gopkg.in/gcfg.v1"
@ -488,11 +487,11 @@ func (os *Rackspace) GetZone() (cloudprovider.Zone, error) {
}
// Create a volume of given size (in GiB)
func (rs *Rackspace) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (volumeName string, volumeAZ string, err error) {
func (rs *Rackspace) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error) {
return "", "", errors.New("unimplemented")
}
func (rs *Rackspace) DeleteVolume(volumeName string) error {
func (rs *Rackspace) DeleteVolume(volumeID string) error {
return errors.New("unimplemented")
}
@ -513,14 +512,14 @@ func (rs *Rackspace) OperationPending(diskName string) (bool, string, error) {
}
// Attaches given cinder volume to the compute running kubelet
func (rs *Rackspace) AttachDisk(instanceID string, diskName string) (string, error) {
disk, err := rs.getVolume(diskName)
func (rs *Rackspace) AttachDisk(instanceID, volumeID string) (string, error) {
volume, err := rs.getVolume(volumeID)
if err != nil {
return "", err
}
if disk.Status != VolumeAvailableStatus {
errmsg := fmt.Sprintf("volume %s status is %s, not %s, can not be attached to instance %s.", disk.Name, disk.Status, VolumeAvailableStatus, instanceID)
if volume.Status != VolumeAvailableStatus {
errmsg := fmt.Sprintf("volume %s status is %s, not %s, can not be attached to instance %s.", volume.Name, volume.Status, VolumeAvailableStatus, instanceID)
glog.Errorf(errmsg)
return "", errors.New(errmsg)
}
@ -530,77 +529,54 @@ func (rs *Rackspace) AttachDisk(instanceID string, diskName string) (string, err
return "", err
}
if len(disk.Attachments) > 0 {
if instanceID == disk.Attachments[0]["server_id"] {
glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, instanceID)
return disk.ID, nil
if len(volume.Attachments) > 0 {
if instanceID == volume.Attachments[0]["server_id"] {
glog.V(4).Infof("Volume: %q is already attached to compute: %q", volumeID, instanceID)
return volume.ID, nil
}
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"])
errMsg := fmt.Sprintf("Volume %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.Attachments[0]["server_id"])
glog.Errorf(errMsg)
return "", errors.New(errMsg)
}
_, err = volumeattach.Create(compute, instanceID, &osvolumeattach.CreateOpts{
VolumeID: disk.ID,
VolumeID: volume.ID,
}).Extract()
if err != nil {
glog.Errorf("Failed to attach %s volume to %s compute", diskName, instanceID)
glog.Errorf("Failed to attach %s volume to %s compute", volumeID, instanceID)
return "", err
}
glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, instanceID)
return disk.ID, nil
glog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID)
return volume.ID, nil
}
// GetDevicePath returns the path of an attached block storage volume, specified by its id.
func (rs *Rackspace) GetDevicePath(diskId string) string {
volume, err := rs.getVolume(diskId)
func (rs *Rackspace) GetDevicePath(volumeID string) string {
volume, err := rs.getVolume(volumeID)
if err != nil {
return ""
}
attachments := volume.Attachments
if len(attachments) != 1 {
glog.Warningf("Unexpected number of volume attachments on %s: %d", diskId, len(attachments))
glog.Warningf("Unexpected number of volume attachments on %s: %d", volumeID, len(attachments))
return ""
}
return attachments[0]["device"].(string)
}
// Takes a partial/full disk id or diskname
func (rs *Rackspace) getVolume(diskName string) (volumes.Volume, error) {
sClient, err := rackspace.NewBlockStorageV1(rs.provider, gophercloud.EndpointOpts{
// Takes a partial/full disk id or volumeName
func (rs *Rackspace) getVolume(volumeID string) (*volumes.Volume, error) {
client, err := rackspace.NewBlockStorageV1(rs.provider, gophercloud.EndpointOpts{
Region: rs.region,
})
var volume volumes.Volume
if err != nil || sClient == nil {
glog.Errorf("Unable to initialize cinder client for region: %s", rs.region)
return volume, err
}
err = volumes.List(sClient).EachPage(func(page pagination.Page) (bool, error) {
vols, err := volumes.ExtractVolumes(page)
if err != nil {
glog.Errorf("Failed to extract volumes: %v", err)
return false, err
}
for _, v := range vols {
glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
if v.Name == diskName || strings.Contains(v.ID, diskName) {
volume = v
return true, nil
}
}
// if it reached here then no disk with the given name was found.
errmsg := fmt.Sprintf("Unable to find disk: %s in region %s", diskName, rs.region)
return false, errors.New(errmsg)
})
volume, err := volumes.Get(client, volumeID).Extract()
if err != nil {
glog.Errorf("Error occurred getting volume: %s", diskName)
glog.Errorf("Error occurred getting volume by ID: %s", volumeID)
return &volumes.Volume{}, err
}
return volume, err
return volume, nil
}
func (rs *Rackspace) getComputeClient() (*gophercloud.ServiceClient, error) {
@ -614,14 +590,14 @@ func (rs *Rackspace) getComputeClient() (*gophercloud.ServiceClient, error) {
}
// Detaches given cinder volume from the compute running kubelet
func (rs *Rackspace) DetachDisk(instanceID string, partialDiskId string) error {
disk, err := rs.getVolume(partialDiskId)
func (rs *Rackspace) DetachDisk(instanceID, volumeID string) error {
volume, err := rs.getVolume(volumeID)
if err != nil {
return err
}
if disk.Status != VolumeInUseStatus {
errmsg := fmt.Sprintf("can not detach volume %s, its status is %s.", disk.Name, disk.Status)
if volume.Status != VolumeInUseStatus {
errmsg := fmt.Sprintf("can not detach volume %s, its status is %s.", volume.Name, volume.Status)
glog.Errorf(errmsg)
return errors.New(errmsg)
}
@ -631,23 +607,23 @@ func (rs *Rackspace) DetachDisk(instanceID string, partialDiskId string) error {
return err
}
if len(disk.Attachments) > 1 {
if len(volume.Attachments) > 1 {
// Rackspace does not support "multiattach", this is a sanity check.
errmsg := fmt.Sprintf("Volume %s is attached to multiple instances, which is not supported by this provider.", disk.ID)
errmsg := fmt.Sprintf("Volume %s is attached to multiple instances, which is not supported by this provider.", volume.ID)
return errors.New(errmsg)
}
if len(disk.Attachments) > 0 && instanceID == disk.Attachments[0]["server_id"] {
if len(volume.Attachments) > 0 && instanceID == volume.Attachments[0]["server_id"] {
// This is a blocking call and effects kubelet's performance directly.
// We should consider kicking it out into a separate routine, if it is bad.
err = volumeattach.Delete(compute, instanceID, disk.ID).ExtractErr()
err = volumeattach.Delete(compute, instanceID, volume.ID).ExtractErr()
if err != nil {
glog.Errorf("Failed to delete volume %s from compute %s attached %v", disk.ID, instanceID, err)
glog.Errorf("Failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err)
return err
}
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", disk.ID, instanceID)
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID)
} else {
errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", disk.Name, instanceID)
errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID)
glog.Errorf(errMsg)
return errors.New(errMsg)
}
@ -656,59 +632,60 @@ func (rs *Rackspace) DetachDisk(instanceID string, partialDiskId string) error {
}
// Get device path of attached volume to the compute running kubelet, as known by cinder
func (rs *Rackspace) GetAttachmentDiskPath(instanceID string, diskName string) (string, error) {
func (rs *Rackspace) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
// See issue #33128 - Cinder does not always tell you the right device path, as such
// we must only use this value as a last resort.
disk, err := rs.getVolume(diskName)
volume, err := rs.getVolume(volumeID)
if err != nil {
return "", err
}
if disk.Status != VolumeInUseStatus {
errmsg := fmt.Sprintf("can not get device path of volume %s, its status is %s.", disk.Name, disk.Status)
if volume.Status != VolumeInUseStatus {
errmsg := fmt.Sprintf("can not get device path of volume %s, its status is %s.", volume.Name, volume.Status)
glog.Errorf(errmsg)
return "", errors.New(errmsg)
}
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil {
if instanceID == disk.Attachments[0]["server_id"] {
if len(volume.Attachments) > 0 && volume.Attachments[0]["server_id"] != nil {
if instanceID == volume.Attachments[0]["server_id"] {
// Attachment[0]["device"] points to the device path
// see http://developer.openstack.org/api-ref-blockstorage-v1.html
return disk.Attachments[0]["device"].(string), nil
return volume.Attachments[0]["device"].(string), nil
} else {
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"])
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.Attachments[0]["server_id"])
glog.Errorf(errMsg)
return "", errors.New(errMsg)
}
}
return "", fmt.Errorf("volume %s is not attached to %s", diskName, instanceID)
return "", fmt.Errorf("volume %s is not attached to %s", volumeID, instanceID)
}
// query if a volume is attached to a compute instance
func (rs *Rackspace) DiskIsAttached(diskName, instanceID string) (bool, error) {
disk, err := rs.getVolume(diskName)
func (rs *Rackspace) DiskIsAttached(instanceID, volumeID string) (bool, error) {
volume, err := rs.getVolume(volumeID)
if err != nil {
return false, err
}
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && instanceID == disk.Attachments[0]["server_id"] {
if len(volume.Attachments) > 0 && volume.Attachments[0]["server_id"] != nil && instanceID == volume.Attachments[0]["server_id"] {
return true, nil
}
return false, nil
}
// query if a list volumes are attached to a compute instance
func (rs *Rackspace) DisksAreAttached(diskNames []string, instanceID string) (map[string]bool, error) {
func (rs *Rackspace) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
attached := make(map[string]bool)
for _, diskName := range diskNames {
attached[diskName] = false
for _, volumeID := range volumeIDs {
attached[volumeID] = false
}
var returnedErr error
for _, diskName := range diskNames {
result, err := rs.DiskIsAttached(diskName, instanceID)
for _, volumeID := range volumeIDs {
result, err := rs.DiskIsAttached(instanceID, volumeID)
if err != nil {
returnedErr = fmt.Errorf("Error in checking disk %q attached: %v \n %v", diskName, err, returnedErr)
returnedErr = fmt.Errorf("Error in checking disk %q attached: %v \n %v", volumeID, err, returnedErr)
continue
}
if result {
attached[diskName] = true
attached[volumeID] = true
}
}

View File

@ -68,7 +68,7 @@ func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.Nod
volumeID := volumeSource.VolumeID
instanceid, err := attacher.nodeInstanceID(nodeName)
instanceID, err := attacher.nodeInstanceID(nodeName)
if err != nil {
return "", err
}
@ -82,23 +82,23 @@ func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.Nod
return "", nil
}
attached, err := attacher.cinderProvider.DiskIsAttached(volumeID, instanceid)
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
if err != nil {
// Log error and continue with attach
glog.Warningf(
"Error checking if volume (%q) is already attached to current instance (%q). Will continue and try attach anyway. err=%v",
volumeID, instanceid, err)
volumeID, instanceID, err)
}
if err == nil && attached {
// Volume is already attached to instance.
glog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceid)
glog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID)
} else {
_, err = attacher.cinderProvider.AttachDisk(instanceid, volumeID)
_, err = attacher.cinderProvider.AttachDisk(instanceID, volumeID)
if err == nil {
glog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceid)
glog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID)
} else {
glog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceid, err)
glog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err)
return "", err
}
}
@ -111,9 +111,9 @@ func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.Nod
return "", err
}
if !pending {
devicePath, err = attacher.cinderProvider.GetAttachmentDiskPath(instanceid, volumeID)
devicePath, err = attacher.cinderProvider.GetAttachmentDiskPath(instanceID, volumeID)
if err != nil {
glog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceid, err)
glog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err)
return "", err
}
}
@ -137,12 +137,12 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
volumeSpecMap[volumeSource.VolumeID] = spec
}
instanceid, err := attacher.nodeInstanceID(nodeName)
instanceID, err := attacher.nodeInstanceID(nodeName)
if err != nil {
return volumesAttachedCheck, err
}
attachedResult, err := attacher.cinderProvider.DisksAreAttached(volumeIDList, instanceid)
attachedResult, err := attacher.cinderProvider.DisksAreAttached(instanceID, volumeIDList)
if err != nil {
// Log error and continue with attach
glog.Errorf(
@ -273,12 +273,12 @@ func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, nodeName type
if !res {
return fmt.Errorf("failed to list openstack instances")
}
instanceid, err := instances.InstanceID(nodeName)
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):]
instanceID, err := instances.InstanceID(nodeName)
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
instanceID = instanceID[(ind + 1):]
}
attached, err := detacher.cinderProvider.DiskIsAttached(volumeID, instanceid)
attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
if err != nil {
// Log error and continue with detach
glog.Errorf(
@ -292,11 +292,11 @@ func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, nodeName type
return nil
}
if err = detacher.cinderProvider.DetachDisk(instanceid, volumeID); err != nil {
if err = detacher.cinderProvider.DetachDisk(instanceID, volumeID); err != nil {
glog.Errorf("Error detaching volume %q: %v", volumeID, err)
return err
}
glog.Infof("detached volume %q from instance %q", volumeID, instanceid)
glog.Infof("detached volume %q from instance %q", volumeID, instanceID)
return nil
}
@ -309,12 +309,12 @@ func (attacher *cinderDiskAttacher) nodeInstanceID(nodeName types.NodeName) (str
if !res {
return "", fmt.Errorf("failed to list openstack instances")
}
instanceid, err := instances.InstanceID(nodeName)
instanceID, err := instances.InstanceID(nodeName)
if err != nil {
return "", err
}
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):]
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
instanceID = instanceID[(ind + 1):]
}
return instanceid, nil
return instanceID, nil
}

View File

@ -106,13 +106,13 @@ type testcase struct {
}
func TestAttachDetach(t *testing.T) {
diskName := "disk"
volumeID := "disk"
instanceID := "instance"
pending := VolumeStatusPending
done := VolumeStatusDone
nodeName := types.NodeName("nodeName")
readOnly := false
spec := createVolSpec(diskName, readOnly)
spec := createVolSpec(volumeID, readOnly)
attachError := errors.New("Fake attach error")
detachError := errors.New("Fake detach error")
diskCheckError := errors.New("Fake DiskIsAttached error")
@ -123,10 +123,10 @@ func TestAttachDetach(t *testing.T) {
{
name: "Attach_Positive",
instanceID: instanceID,
operationPending: operationPendingCall{diskName, false, done, nil},
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil},
attach: attachCall{diskName, instanceID, "", nil},
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
attach: attachCall{instanceID, volumeID, "", nil},
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
@ -138,9 +138,9 @@ func TestAttachDetach(t *testing.T) {
{
name: "Attach_Positive_AlreadyAttached",
instanceID: instanceID,
operationPending: operationPendingCall{diskName, false, done, nil},
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil},
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil},
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
@ -152,8 +152,8 @@ func TestAttachDetach(t *testing.T) {
{
name: "Attach_is_attaching",
instanceID: instanceID,
operationPending: operationPendingCall{diskName, true, pending, nil},
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
operationPending: operationPendingCall{volumeID, true, pending, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
@ -165,10 +165,10 @@ func TestAttachDetach(t *testing.T) {
{
name: "Attach_Positive_CheckFails",
instanceID: instanceID,
operationPending: operationPendingCall{diskName, false, done, nil},
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
attach: attachCall{diskName, instanceID, "", nil},
diskPath: diskPathCall{diskName, instanceID, "/dev/sda", nil},
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
attach: attachCall{instanceID, volumeID, "", nil},
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
@ -180,9 +180,9 @@ func TestAttachDetach(t *testing.T) {
{
name: "Attach_Negative",
instanceID: instanceID,
operationPending: operationPendingCall{diskName, false, done, nil},
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
attach: attachCall{diskName, instanceID, "/dev/sda", attachError},
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
attach: attachCall{instanceID, volumeID, "/dev/sda", attachError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
@ -194,10 +194,10 @@ func TestAttachDetach(t *testing.T) {
{
name: "Attach_Negative_DiskPatchFails",
instanceID: instanceID,
operationPending: operationPendingCall{diskName, false, done, nil},
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
attach: attachCall{diskName, instanceID, "", nil},
diskPath: diskPathCall{diskName, instanceID, "", diskPathError},
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
attach: attachCall{instanceID, volumeID, "", nil},
diskPath: diskPathCall{instanceID, volumeID, "", diskPathError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
@ -209,7 +209,7 @@ func TestAttachDetach(t *testing.T) {
{
name: "VolumesAreAttached_Positive",
instanceID: instanceID,
disksAreAttached: disksAreAttachedCall{[]string{diskName}, instanceID, map[string]bool{diskName: true}, nil},
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: true}, nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
@ -222,7 +222,7 @@ func TestAttachDetach(t *testing.T) {
{
name: "VolumesAreAttached_Negative",
instanceID: instanceID,
disksAreAttached: disksAreAttachedCall{[]string{diskName}, instanceID, map[string]bool{diskName: false}, nil},
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: false}, nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
@ -235,7 +235,7 @@ func TestAttachDetach(t *testing.T) {
{
name: "VolumesAreAttached_CinderFailed",
instanceID: instanceID,
disksAreAttached: disksAreAttachedCall{[]string{diskName}, instanceID, nil, disksCheckError},
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, nil, disksCheckError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
@ -249,11 +249,11 @@ func TestAttachDetach(t *testing.T) {
{
name: "Detach_Positive",
instanceID: instanceID,
diskIsAttached: diskIsAttachedCall{diskName, instanceID, true, nil},
detach: detachCall{diskName, instanceID, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil},
detach: detachCall{instanceID, volumeID, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
return "", detacher.Detach(volumeID, nodeName)
},
},
@ -261,10 +261,10 @@ func TestAttachDetach(t *testing.T) {
{
name: "Detach_Positive_AlreadyDetached",
instanceID: instanceID,
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
return "", detacher.Detach(volumeID, nodeName)
},
},
@ -272,11 +272,11 @@ func TestAttachDetach(t *testing.T) {
{
name: "Detach_Positive_CheckFails",
instanceID: instanceID,
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
detach: detachCall{diskName, instanceID, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
detach: detachCall{instanceID, volumeID, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
return "", detacher.Detach(volumeID, nodeName)
},
},
@ -284,11 +284,11 @@ func TestAttachDetach(t *testing.T) {
{
name: "Detach_Negative",
instanceID: instanceID,
diskIsAttached: diskIsAttachedCall{diskName, instanceID, false, diskCheckError},
detach: detachCall{diskName, instanceID, detachError},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
detach: detachCall{instanceID, volumeID, detachError},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
return "", detacher.Detach(volumeID, nodeName)
},
expectedError: detachError,
},
@ -303,12 +303,11 @@ func TestAttachDetach(t *testing.T) {
if result != testcase.expectedResult {
t.Errorf("%s failed: expected result=%q, got %q", testcase.name, testcase.expectedResult, result)
}
t.Logf("Test %q succeeded", testcase.name)
}
}
type volumeAttachmentFlag struct {
diskName string
volumeID string
attached bool
}
@ -323,10 +322,10 @@ func (va volumeAttachmentFlags) Swap(i, j int) {
}
func (va volumeAttachmentFlags) Less(i, j int) bool {
if va[i].diskName < va[j].diskName {
if va[i].volumeID < va[j].volumeID {
return true
}
if va[i].diskName > va[j].diskName {
if va[i].volumeID > va[j].volumeID {
return false
}
return va[j].attached
@ -396,15 +395,15 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
// Fake GCE implementation
type attachCall struct {
diskName string
instanceID string
volumeID string
retDeviceName string
ret error
}
type detachCall struct {
devicePath string
instanceID string
devicePath string
ret error
}
@ -416,37 +415,39 @@ type operationPendingCall struct {
}
type diskIsAttachedCall struct {
diskName, instanceID string
isAttached bool
ret error
instanceID string
volumeID string
isAttached bool
ret error
}
type diskPathCall struct {
diskName, instanceID string
retPath string
ret error
instanceID string
volumeID string
retPath string
ret error
}
type disksAreAttachedCall struct {
diskNames []string
instanceID string
volumeIDs []string
areAttached map[string]bool
ret error
}
func (testcase *testcase) AttachDisk(instanceID string, diskName string) (string, error) {
func (testcase *testcase) AttachDisk(instanceID, volumeID string) (string, error) {
expected := &testcase.attach
if expected.diskName == "" && expected.instanceID == "" {
if expected.volumeID == "" && expected.instanceID == "" {
// testcase.attach looks uninitialized, test did not expect to call
// AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!")
return "", errors.New("Unexpected AttachDisk call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return "", errors.New("Unexpected AttachDisk call: wrong diskName")
if expected.volumeID != volumeID {
testcase.t.Errorf("Unexpected AttachDisk call: expected volumeID %s, got %s", expected.volumeID, volumeID)
return "", errors.New("Unexpected AttachDisk call: wrong volumeID")
}
if expected.instanceID != instanceID {
@ -454,12 +455,12 @@ func (testcase *testcase) AttachDisk(instanceID string, diskName string) (string
return "", errors.New("Unexpected AttachDisk call: wrong instanceID")
}
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, instanceID, expected.retDeviceName, expected.ret)
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret
}
func (testcase *testcase) DetachDisk(instanceID string, partialDiskId string) error {
func (testcase *testcase) DetachDisk(instanceID, volumeID string) error {
expected := &testcase.detach
if expected.devicePath == "" && expected.instanceID == "" {
@ -469,9 +470,9 @@ func (testcase *testcase) DetachDisk(instanceID string, partialDiskId string) er
return errors.New("Unexpected DetachDisk call!")
}
if expected.devicePath != partialDiskId {
testcase.t.Errorf("Unexpected DetachDisk call: expected partialDiskId %s, got %s", expected.devicePath, partialDiskId)
return errors.New("Unexpected DetachDisk call: wrong diskName")
if expected.devicePath != volumeID {
testcase.t.Errorf("Unexpected DetachDisk call: expected volumeID %s, got %s", expected.devicePath, volumeID)
return errors.New("Unexpected DetachDisk call: wrong volumeID")
}
if expected.instanceID != instanceID {
@ -479,7 +480,7 @@ func (testcase *testcase) DetachDisk(instanceID string, partialDiskId string) er
return errors.New("Unexpected DetachDisk call: wrong instanceID")
}
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", partialDiskId, instanceID, expected.ret)
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret)
return expected.ret
}
@ -497,19 +498,19 @@ func (testcase *testcase) OperationPending(diskName string) (bool, string, error
return false, expected.volumeStatus, expected.ret
}
func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, error) {
func (testcase *testcase) DiskIsAttached(instanceID, volumeID string) (bool, error) {
expected := &testcase.diskIsAttached
if expected.diskName == "" && expected.instanceID == "" {
if expected.volumeID == "" && expected.instanceID == "" {
// testcase.diskIsAttached looks uninitialized, test did not expect to
// call DiskIsAttached
testcase.t.Errorf("Unexpected DiskIsAttached call!")
return false, errors.New("Unexpected DiskIsAttached call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected diskName %s, got %s", expected.diskName, diskName)
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
if expected.volumeID != volumeID {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected volumeID %s, got %s", expected.volumeID, volumeID)
return false, errors.New("Unexpected DiskIsAttached call: wrong volumeID")
}
if expected.instanceID != instanceID {
@ -517,23 +518,23 @@ func (testcase *testcase) DiskIsAttached(diskName, instanceID string) (bool, err
return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID")
}
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, instanceID, expected.isAttached, expected.ret)
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret
}
func (testcase *testcase) GetAttachmentDiskPath(instanceID string, diskName string) (string, error) {
func (testcase *testcase) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
expected := &testcase.diskPath
if expected.diskName == "" && expected.instanceID == "" {
if expected.volumeID == "" && expected.instanceID == "" {
// testcase.diskPath looks uninitialized, test did not expect to
// call GetAttachmentDiskPath
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call!")
return "", errors.New("Unexpected GetAttachmentDiskPath call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected diskName %s, got %s", expected.diskName, diskName)
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong diskName")
if expected.volumeID != volumeID {
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected volumeID %s, got %s", expected.volumeID, volumeID)
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong volumeID")
}
if expected.instanceID != instanceID {
@ -541,7 +542,7 @@ func (testcase *testcase) GetAttachmentDiskPath(instanceID string, diskName stri
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong instanceID")
}
glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", diskName, instanceID, expected.retPath, expected.ret)
glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret)
return expected.retPath, expected.ret
}
@ -550,11 +551,11 @@ func (testcase *testcase) ShouldTrustDevicePath() bool {
return true
}
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (volumeId string, volumeAZ string, err error) {
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error) {
return "", "", errors.New("Not implemented")
}
func (testcase *testcase) GetDevicePath(diskId string) string {
func (testcase *testcase) GetDevicePath(volumeID string) string {
return ""
}
@ -562,7 +563,7 @@ func (testcase *testcase) InstanceID() (string, error) {
return testcase.instanceID, nil
}
func (testcase *testcase) DeleteVolume(volumeName string) error {
func (testcase *testcase) DeleteVolume(volumeID string) error {
return errors.New("Not implemented")
}
@ -574,20 +575,20 @@ func (testcase *testcase) Instances() (cloudprovider.Instances, bool) {
return &instances{testcase.instanceID}, true
}
func (testcase *testcase) DisksAreAttached(diskNames []string, instanceID string) (map[string]bool, error) {
func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
expected := &testcase.disksAreAttached
areAttached := make(map[string]bool)
if len(expected.diskNames) == 0 && expected.instanceID == "" {
// testcase.diskNames looks uninitialized, test did not expect to call DisksAreAttached
if len(expected.volumeIDs) == 0 && expected.instanceID == "" {
// testcase.volumeIDs looks uninitialized, test did not expect to call DisksAreAttached
testcase.t.Errorf("Unexpected DisksAreAttached call!")
return areAttached, errors.New("Unexpected DisksAreAttached call")
}
if !reflect.DeepEqual(expected.diskNames, diskNames) {
testcase.t.Errorf("Unexpected DisksAreAttached call: expected diskNames %v, got %v", expected.diskNames, diskNames)
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong diskName")
if !reflect.DeepEqual(expected.volumeIDs, volumeIDs) {
testcase.t.Errorf("Unexpected DisksAreAttached call: expected volumeIDs %v, got %v", expected.volumeIDs, volumeIDs)
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong volumeID")
}
if expected.instanceID != instanceID {
@ -595,7 +596,7 @@ func (testcase *testcase) DisksAreAttached(diskNames []string, instanceID string
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong instanceID")
}
glog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", diskNames, instanceID, expected.areAttached, expected.ret)
glog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret)
return expected.areAttached, expected.ret
}

View File

@ -44,16 +44,16 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
}
type CinderProvider interface {
AttachDisk(instanceID string, diskName string) (string, error)
DetachDisk(instanceID string, partialDiskId string) error
DeleteVolume(volumeName string) error
AttachDisk(instanceID, volumeID string) (string, error)
DetachDisk(instanceID, volumeID string) error
DeleteVolume(volumeID string) error
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error)
GetDevicePath(diskId string) string
GetDevicePath(volumeID string) string
InstanceID() (string, error)
GetAttachmentDiskPath(instanceID string, diskName string) (string, error)
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
OperationPending(diskName string) (bool, string, error)
DiskIsAttached(diskName, instanceID string) (bool, error)
DisksAreAttached(diskNames []string, instanceID string) (map[string]bool, error)
DiskIsAttached(instanceID, volumeID string) (bool, error)
DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error)
ShouldTrustDevicePath() bool
Instances() (cloudprovider.Instances, bool)
}
@ -263,8 +263,6 @@ type cinderVolume struct {
pdName string
// Filesystem type, optional.
fsType string
// Specifies the partition to mount
//partition string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// Utility interface that provides API calls to the provider to attach/detach disks.

View File

@ -201,18 +201,18 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
}
}
volumeId, volumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
volumeID, volumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
if errr != nil {
glog.V(2).Infof("Error creating cinder volume: %v", errr)
return "", 0, nil, errr
}
glog.V(2).Infof("Successfully created cinder volume %s", volumeId)
glog.V(2).Infof("Successfully created cinder volume %s", volumeID)
// these are needed that pod is spawning to same AZ
volumeLabels = make(map[string]string)
volumeLabels[metav1.LabelZoneFailureDomain] = volumeAZ
return volumeId, volSizeGB, volumeLabels, nil
return volumeID, volSizeGB, volumeLabels, nil
}
func probeAttachedVolume() error {