mirror of https://github.com/k3s-io/k3s
Fixes the regression of GCEPD not provisioning correctly on alpha clusters.
parent
283d35a481
commit
02352460f6
|
@ -111,6 +111,8 @@ type gceServiceManager struct {
|
|||
gce *GCECloud
|
||||
}
|
||||
|
||||
var _ diskServiceManager = &gceServiceManager{}
|
||||
|
||||
func (manager *gceServiceManager) CreateDiskOnCloudProvider(
|
||||
name string,
|
||||
sizeGb int64,
|
||||
|
@ -118,23 +120,11 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
|
|||
diskType string,
|
||||
zone string) (gceObject, error) {
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, singleZone{zone}, diskType)
|
||||
manager.gce.region /* diskRegion */, singleZone{zone}, diskType, false /* useAlphaAPI */)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
diskToCreateAlpha := &computealpha.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
Description: tagsStr,
|
||||
Type: diskTypeURI,
|
||||
}
|
||||
|
||||
return manager.gce.serviceAlpha.Disks.Insert(
|
||||
manager.gce.projectID, zone, diskToCreateAlpha).Do()
|
||||
}
|
||||
|
||||
diskToCreateV1 := &compute.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
|
@ -151,17 +141,19 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
|
|||
tagsStr string,
|
||||
diskType string,
|
||||
replicaZones sets.String) (gceObject, error) {
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullyQualifiedReplicaZones := []string{}
|
||||
for _, replicaZone := range replicaZones.UnsortedList() {
|
||||
fullyQualifiedReplicaZones = append(
|
||||
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone))
|
||||
}
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useAlphaAPI */)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullyQualifiedReplicaZones := []string{}
|
||||
for _, replicaZone := range replicaZones.UnsortedList() {
|
||||
fullyQualifiedReplicaZones = append(
|
||||
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true))
|
||||
}
|
||||
|
||||
diskToCreateAlpha := &computealpha.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
|
@ -186,24 +178,12 @@ func (manager *gceServiceManager) AttachDiskOnCloudProvider(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
attachedDiskAlpha := &computealpha.AttachedDisk{
|
||||
DeviceName: disk.Name,
|
||||
Kind: disk.Kind,
|
||||
Mode: readWrite,
|
||||
Source: source,
|
||||
Type: diskTypePersistent,
|
||||
}
|
||||
return manager.gce.serviceAlpha.Instances.AttachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, attachedDiskAlpha).Do()
|
||||
}
|
||||
|
||||
attachedDiskV1 := &compute.AttachedDisk{
|
||||
DeviceName: disk.Name,
|
||||
Kind: disk.Kind,
|
||||
Mode: readWrite,
|
||||
Source: source,
|
||||
Type: disk.Type,
|
||||
Type: diskTypePersistent,
|
||||
}
|
||||
return manager.gce.service.Instances.AttachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, attachedDiskV1).Do()
|
||||
|
@ -213,11 +193,6 @@ func (manager *gceServiceManager) DetachDiskOnCloudProvider(
|
|||
instanceZone string,
|
||||
instanceName string,
|
||||
devicePath string) (gceObject, error) {
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
manager.gce.serviceAlpha.Instances.DetachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, devicePath).Do()
|
||||
}
|
||||
|
||||
return manager.gce.service.Instances.DetachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, devicePath).Do()
|
||||
}
|
||||
|
@ -233,45 +208,6 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider(
|
|||
return nil, fmt.Errorf("Can not fetch disk. Zone is specified (%q). But disk name is empty.", zone)
|
||||
}
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
diskAlpha, err := manager.gce.serviceAlpha.Disks.Get(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var zoneInfo zoneType
|
||||
if len(diskAlpha.ReplicaZones) > 1 {
|
||||
zones := sets.NewString()
|
||||
for _, zoneURI := range diskAlpha.ReplicaZones {
|
||||
zones.Insert(lastComponent(zoneURI))
|
||||
}
|
||||
zoneInfo = multiZone{zones}
|
||||
} else {
|
||||
zoneInfo = singleZone{lastComponent(diskAlpha.Zone)}
|
||||
if diskAlpha.Zone == "" {
|
||||
zoneInfo = singleZone{lastComponent(zone)}
|
||||
}
|
||||
}
|
||||
|
||||
region := strings.TrimSpace(lastComponent(diskAlpha.Region))
|
||||
if region == "" {
|
||||
region, err = manager.getRegionFromZone(zoneInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract region from zone for %q/%q err=%v", zone, diskName, err)
|
||||
}
|
||||
}
|
||||
|
||||
return &GCEDisk{
|
||||
ZoneInfo: zoneInfo,
|
||||
Region: region,
|
||||
Name: diskAlpha.Name,
|
||||
Kind: diskAlpha.Kind,
|
||||
Type: diskAlpha.Type,
|
||||
SizeGb: diskAlpha.SizeGb,
|
||||
}, nil
|
||||
}
|
||||
|
||||
diskStable, err := manager.gce.service.Disks.Get(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
if err != nil {
|
||||
|
@ -329,12 +265,6 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider(
|
|||
func (manager *gceServiceManager) DeleteDiskOnCloudProvider(
|
||||
zone string,
|
||||
diskName string) (gceObject, error) {
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
return manager.gce.serviceAlpha.Disks.Delete(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
}
|
||||
|
||||
return manager.gce.service.Disks.Delete(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
}
|
||||
|
@ -361,9 +291,6 @@ func (manager *gceServiceManager) WaitForRegionalOp(
|
|||
|
||||
func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error) {
|
||||
getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint()
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha()
|
||||
}
|
||||
|
||||
switch zoneInfo := disk.ZoneInfo.(type) {
|
||||
case singleZone:
|
||||
|
@ -397,10 +324,13 @@ func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error
|
|||
}
|
||||
|
||||
func (manager *gceServiceManager) getDiskTypeURI(
|
||||
diskRegion string, diskZoneInfo zoneType, diskType string) (string, error) {
|
||||
getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint()
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
diskRegion string, diskZoneInfo zoneType, diskType string, useAlphaAPI bool) (string, error) {
|
||||
|
||||
var getProjectsAPIEndpoint string
|
||||
if useAlphaAPI {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha()
|
||||
} else {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint()
|
||||
}
|
||||
|
||||
switch zoneInfo := diskZoneInfo.(type) {
|
||||
|
@ -430,10 +360,12 @@ func (manager *gceServiceManager) getDiskTypeURI(
|
|||
}
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getReplicaZoneURI(zone string) string {
|
||||
getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint()
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
func (manager *gceServiceManager) getReplicaZoneURI(zone string, useAlphaAPI bool) string {
|
||||
var getProjectsAPIEndpoint string
|
||||
if useAlphaAPI {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha()
|
||||
} else {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint()
|
||||
}
|
||||
|
||||
return getProjectsAPIEndpoint + fmt.Sprintf(
|
||||
|
@ -477,13 +409,6 @@ func (manager *gceServiceManager) getRegionFromZone(zoneInfo zoneType) (string,
|
|||
}
|
||||
|
||||
func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) (gceObject, error) {
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
resizeServiceRequest := &computealpha.DisksResizeRequest{
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
return manager.gce.serviceAlpha.Disks.Resize(manager.gce.projectID, zone, disk.Name, resizeServiceRequest).Do()
|
||||
|
||||
}
|
||||
resizeServiceRequest := &compute.DisksResizeRequest{
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
|
@ -504,7 +429,7 @@ func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDis
|
|||
type Disks interface {
|
||||
// AttachDisk attaches given disk to the node with the specified NodeName.
|
||||
// Current instance is used when instanceID is empty string.
|
||||
AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error
|
||||
AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error
|
||||
|
||||
// DetachDisk detaches given disk to the node with the specified NodeName.
|
||||
// Current instance is used when nodeName is empty string.
|
||||
|
@ -594,7 +519,7 @@ func (gce *GCECloud) GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]str
|
|||
return labels, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
|
||||
func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
|
@ -604,7 +529,7 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn
|
|||
// Try fetching as regional PD
|
||||
var disk *GCEDisk
|
||||
var mc *metricContext
|
||||
if gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
if regional {
|
||||
disk, err = gce.getRegionalDiskByName(diskName)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Could not find regional PD named %q to Attach. Will look for a zonal PD", diskName)
|
||||
|
|
|
@ -18,6 +18,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
@ -42,6 +43,7 @@ go_test(
|
|||
embed = [":go_default_library"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/gce_pd",
|
||||
deps = [
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
|
|
|
@ -88,7 +88,7 @@ func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName ty
|
|||
// Volume is already attached to node.
|
||||
glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName)
|
||||
} else {
|
||||
if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly); err != nil {
|
||||
if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly, isRegionalPD(spec)); err != nil {
|
||||
glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
|
@ -48,7 +50,7 @@ func TestGetDeviceName_Volume(t *testing.T) {
|
|||
func TestGetDeviceName_PersistentVolume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-pd-pv"
|
||||
spec := createPVSpec(name, true)
|
||||
spec := createPVSpec(name, true, nil)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
|
@ -74,10 +76,39 @@ type testcase struct {
|
|||
expectedReturn error
|
||||
}
|
||||
|
||||
func TestAttachDetachRegional(t *testing.T) {
|
||||
diskName := "disk"
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
regional := true
|
||||
spec := createPVSpec(diskName, readOnly, []string{"zone1", "zone2"})
|
||||
// Successful Attach call
|
||||
testcase := testcase{
|
||||
name: "Attach_Regional_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, regional, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "/dev/disk/by-id/google-disk" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
err := testcase.test(&testcase)
|
||||
if err != testcase.expectedReturn {
|
||||
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedReturn.Error(), err.Error())
|
||||
}
|
||||
t.Logf("Test %q succeeded", testcase.name)
|
||||
}
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := "disk"
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
regional := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
|
@ -87,7 +118,7 @@ func TestAttachDetach(t *testing.T) {
|
|||
{
|
||||
name: "Attach_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, regional, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
|
@ -116,7 +147,7 @@ func TestAttachDetach(t *testing.T) {
|
|||
{
|
||||
name: "Attach_Positive_CheckFails",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, regional, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
|
@ -131,7 +162,7 @@ func TestAttachDetach(t *testing.T) {
|
|||
{
|
||||
name: "Attach_Negative",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, attachError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, regional, attachError},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
|
@ -238,8 +269,8 @@ func createVolSpec(name string, readOnly bool) *volume.Spec {
|
|||
}
|
||||
}
|
||||
|
||||
func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
func createPVSpec(name string, readOnly bool, zones []string) *volume.Spec {
|
||||
spec := &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
|
@ -251,6 +282,15 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
if zones != nil {
|
||||
zonesLabel := strings.Join(zones, kubeletapis.LabelMultiZoneDelimiter)
|
||||
spec.PersistentVolume.ObjectMeta.Labels = map[string]string{
|
||||
kubeletapis.LabelZoneFailureDomain: zonesLabel,
|
||||
}
|
||||
}
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
// Fake GCE implementation
|
||||
|
@ -259,6 +299,7 @@ type attachCall struct {
|
|||
diskName string
|
||||
nodeName types.NodeName
|
||||
readOnly bool
|
||||
regional bool
|
||||
ret error
|
||||
}
|
||||
|
||||
|
@ -275,7 +316,7 @@ type diskIsAttachedCall struct {
|
|||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
|
||||
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
|
@ -300,6 +341,11 @@ func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, r
|
|||
return errors.New("Unexpected AttachDisk call: wrong readOnly")
|
||||
}
|
||||
|
||||
if expected.regional != regional {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected regional %v, got %v", expected.regional, regional)
|
||||
return errors.New("Unexpected AttachDisk call: wrong regional")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.ret)
|
||||
|
||||
return expected.ret
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/utils/exec"
|
||||
|
@ -356,3 +357,13 @@ func udevadmChangeToDrive(drivePath string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks whether the given GCE PD volume spec is associated with a regional PD.
|
||||
func isRegionalPD(spec *volume.Spec) bool {
|
||||
if spec.PersistentVolume != nil {
|
||||
zonesLabel := spec.PersistentVolume.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
zones := strings.Split(zonesLabel, kubeletapis.LabelMultiZoneDelimiter)
|
||||
return len(zones) > 1
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue