use danglingerror

add getNodeNameByID and use volume.AttachedDevice as devicepath

use uppercase functionname

do not delete automatically nodes if node is shutdowned in openstack

do not delete node

fix gofmt

fix cinder detach if instance is not in active state

fix gofmt
pull/6/head
Jesse Haka 2017-12-06 09:33:45 +02:00
parent cfc1d47397
commit bac270533e
6 changed files with 58 additions and 11 deletions

View File

@ -26,6 +26,7 @@ go_library(
"//pkg/controller:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/gophercloud/gophercloud:go_default_library",
"//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library",

View File

@ -319,6 +319,22 @@ func mapNodeNameToServerName(nodeName types.NodeName) string {
return string(nodeName)
}
// getNodeNameByID maps instanceid to types.NodeName
func (os *OpenStack) GetNodeNameByID(instanceID string) (types.NodeName, error) {
client, err := os.NewComputeV2()
var nodeName types.NodeName
if err != nil {
return nodeName, err
}
server, err := servers.Get(client, instanceID).Extract()
if err != nil {
return nodeName, err
}
nodeName = mapServerToNodeName(server)
return nodeName, nil
}
// mapServerToNodeName maps an OpenStack Server to a k8s NodeName
func mapServerToNodeName(server *servers.Server) types.NodeName {
// Node names are always lowercase, and (at least)
@ -346,11 +362,14 @@ func foreachServer(client *gophercloud.ServiceClient, opts servers.ListOptsBuild
return err
}
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*servers.Server, error) {
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName, showOnlyActive bool) (*servers.Server, error) {
opts := servers.ListOpts{
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))),
Status: "ACTIVE",
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))),
}
if showOnlyActive {
opts.Status = "ACTIVE"
}
pager := servers.List(client, opts)
serverList := make([]servers.Server, 0, 1)
@ -432,7 +451,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) {
}
func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName) ([]v1.NodeAddress, error) {
srv, err := getServerByName(client, name)
srv, err := getServerByName(client, name, true)
if err != nil {
return nil, err
}
@ -582,7 +601,7 @@ func (os *OpenStack) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Z
return cloudprovider.Zone{}, err
}
srv, err := getServerByName(compute, nodeName)
srv, err := getServerByName(compute, nodeName, true)
if err != nil {
if err == ErrNotFound {
return cloudprovider.Zone{}, cloudprovider.InstanceNotFound

View File

@ -103,7 +103,7 @@ func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddre
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
func (i *Instances) ExternalID(name types.NodeName) (string, error) {
srv, err := getServerByName(i.compute, name)
srv, err := getServerByName(i.compute, name, true)
if err != nil {
if err == ErrNotFound {
return "", cloudprovider.InstanceNotFound
@ -151,7 +151,7 @@ func (os *OpenStack) InstanceID() (string, error) {
// InstanceID returns the cloud provider ID of the specified instance.
func (i *Instances) InstanceID(name types.NodeName) (string, error) {
srv, err := getServerByName(i.compute, name)
srv, err := getServerByName(i.compute, name, true)
if err != nil {
if err == ErrNotFound {
return "", cloudprovider.InstanceNotFound
@ -184,7 +184,7 @@ func (i *Instances) InstanceTypeByProviderID(providerID string) (string, error)
// InstanceType returns the type of the specified instance.
func (i *Instances) InstanceType(name types.NodeName) (string, error) {
srv, err := getServerByName(i.compute, name)
srv, err := getServerByName(i.compute, name, true)
if err != nil {
return "", err

View File

@ -551,7 +551,7 @@ func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1
for _, node := range nodes {
nodeName := types.NodeName(node.Name)
srv, err := getServerByName(compute, nodeName)
srv, err := getServerByName(compute, nodeName, true)
if err != nil {
return nodeSecurityGroupIDs.List(), err
}

View File

@ -288,7 +288,7 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err
}
func getPortIDByIP(compute *gophercloud.ServiceClient, targetNode types.NodeName, ipAddress string) (string, error) {
srv, err := getServerByName(compute, targetNode)
srv, err := getServerByName(compute, targetNode, true)
if err != nil {
return "", err
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package openstack
import (
"errors"
"fmt"
"io/ioutil"
"path"
@ -26,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
k8s_volume "k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"github.com/gophercloud/gophercloud"
volumeexpand "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions"
@ -317,8 +319,33 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) {
if instanceID == volume.AttachedServerId {
glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID)
return volume.ID, nil
} else {
nodeName, err := os.GetNodeNameByID(volume.AttachedServerId)
attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerId)
if err != nil {
glog.Error(attachErr)
return "", errors.New(attachErr)
}
// using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128
devicePath := volume.AttachedDevice
danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath)
glog.V(4).Infof("volume %s is already attached to node %s path %s", volumeID, nodeName, devicePath)
// check special case, if node is deleted from cluster but exist still in openstack
// we need to check can we detach the cinder, node is deleted from cluster if state is not ACTIVE
srv, err := getServerByName(cClient, nodeName, false)
if err != nil {
return "", err
}
if srv.Status != "ACTIVE" {
err = os.DetachDisk(volume.AttachedServerId, volumeID)
if err != nil {
glog.Error(err)
return "", err
}
glog.V(4).Infof("detached volume %s node state was %s", volumeID, srv.Status)
}
return "", danglingErr
}
return "", fmt.Errorf("disk %s is attached to a different instance (%s)", volumeID, volume.AttachedServerId)
}
startTime := time.Now()