mirror of https://github.com/k3s-io/k3s
Revert "Refactor GCE wrapper library to allow execution from E2E test suite"
parent
6fd986065b
commit
8a922e22be
|
@ -62,12 +62,13 @@ const (
|
|||
|
||||
// GCECloud is an implementation of Interface, TCPLoadBalancer and Instances for Google Compute Engine.
|
||||
type GCECloud struct {
|
||||
service *compute.Service
|
||||
containerService *container.Service
|
||||
projectID string
|
||||
zone string
|
||||
networkURL string
|
||||
useMetadataServer bool
|
||||
service *compute.Service
|
||||
containerService *container.Service
|
||||
projectID string
|
||||
zone string
|
||||
instanceID string
|
||||
externalID string
|
||||
networkURL string
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
|
@ -100,7 +101,7 @@ func getProjectAndZone() (string, string, error) {
|
|||
return projectID, zone, nil
|
||||
}
|
||||
|
||||
func getInstanceIDViaMetadata() (string, error) {
|
||||
func getInstanceID() (string, error) {
|
||||
result, err := metadata.Get("instance/hostname")
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -112,7 +113,7 @@ func getInstanceIDViaMetadata() (string, error) {
|
|||
return parts[0], nil
|
||||
}
|
||||
|
||||
func getCurrentExternalIDViaMetadata() (string, error) {
|
||||
func getCurrentExternalID() (string, error) {
|
||||
externalID, err := metadata.Get("instance/id")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't get external ID: %v", err)
|
||||
|
@ -120,7 +121,7 @@ func getCurrentExternalIDViaMetadata() (string, error) {
|
|||
return externalID, nil
|
||||
}
|
||||
|
||||
func getNetworkNameViaMetadata() (string, error) {
|
||||
func getNetworkName() (string, error) {
|
||||
result, err := metadata.Get("instance/network-interfaces/0/network")
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -132,32 +133,28 @@ func getNetworkNameViaMetadata() (string, error) {
|
|||
return parts[3], nil
|
||||
}
|
||||
|
||||
func getNetworkNameViaAPICall(svc *compute.Service, projectID string) (string, error) {
|
||||
networkList, err := svc.Networks.List(projectID).Do()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if networkList == nil || len(networkList.Items) <= 0 {
|
||||
return "", fmt.Errorf("GCE Network List call returned no networks for project %q.", projectID)
|
||||
}
|
||||
|
||||
return networkList.Items[0].Name, nil
|
||||
}
|
||||
|
||||
// newGCECloud creates a new instance of GCECloud.
|
||||
func newGCECloud(config io.Reader) (*GCECloud, error) {
|
||||
projectID, zone, err := getProjectAndZone()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
networkName, err := getNetworkNameViaMetadata()
|
||||
// TODO: if we want to use this on a machine that doesn't have the http://metadata server
|
||||
// e.g. on a user's machine (not VM) somewhere, we need to have an alternative for
|
||||
// instance id lookup.
|
||||
instanceID, err := getInstanceID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
externalID, err := getCurrentExternalID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
networkName, err := getNetworkName()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
networkURL := gceNetworkURL(projectID, networkName)
|
||||
|
||||
tokenSource := google.ComputeTokenSource("")
|
||||
if config != nil {
|
||||
var cfg Config
|
||||
|
@ -179,51 +176,23 @@ func newGCECloud(config io.Reader) (*GCECloud, error) {
|
|||
tokenSource = newAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody)
|
||||
}
|
||||
}
|
||||
|
||||
return CreateGCECloud(projectID, zone, networkURL, tokenSource, true /* useMetadataServer */)
|
||||
}
|
||||
|
||||
// Creates a GCECloud object using the specified parameters.
|
||||
// If no networkUrl is specified, loads networkName via rest call.
|
||||
// If no tokenSource is specified, uses oauth2.DefaultTokenSource.
|
||||
func CreateGCECloud(projectID, zone, networkURL string, tokenSource oauth2.TokenSource, useMetadataServer bool) (*GCECloud, error) {
|
||||
if tokenSource == nil {
|
||||
var err error
|
||||
tokenSource, err = google.DefaultTokenSource(
|
||||
oauth2.NoContext,
|
||||
compute.CloudPlatformScope,
|
||||
compute.ComputeScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
client := oauth2.NewClient(oauth2.NoContext, tokenSource)
|
||||
svc, err := compute.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containerSvc, err := container.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if networkURL == "" {
|
||||
networkName, err := getNetworkNameViaAPICall(svc, projectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
networkURL = gceNetworkURL(projectID, networkName)
|
||||
}
|
||||
|
||||
return &GCECloud{
|
||||
service: svc,
|
||||
containerService: containerSvc,
|
||||
projectID: projectID,
|
||||
zone: zone,
|
||||
networkURL: networkURL,
|
||||
useMetadataServer: useMetadataServer,
|
||||
service: svc,
|
||||
containerService: containerSvc,
|
||||
projectID: projectID,
|
||||
zone: zone,
|
||||
instanceID: instanceID,
|
||||
externalID: externalID,
|
||||
networkURL: networkURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1399,31 +1368,16 @@ func (gce *GCECloud) NodeAddresses(_ string) ([]api.NodeAddress, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// isCurrentInstance uses metadata server to check if specified instanceID matches current machine's instanceID
|
||||
func (gce *GCECloud) isCurrentInstance(instanceID string) bool {
|
||||
currentInstanceID, err := getInstanceIDViaMetadata()
|
||||
if err != nil {
|
||||
// Log and swallow error
|
||||
glog.Errorf("Failed to fetch instanceID via Metadata: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return currentInstanceID == canonicalizeInstanceName(instanceID)
|
||||
func (gce *GCECloud) isCurrentInstance(instance string) bool {
|
||||
return gce.instanceID == canonicalizeInstanceName(instance)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (gce *GCECloud) ExternalID(instance string) (string, error) {
|
||||
if gce.useMetadataServer {
|
||||
// Use metadata, if possible, to fetch ID. See issue #12000
|
||||
if gce.isCurrentInstance(instance) {
|
||||
externalInstanceID, err := getCurrentExternalIDViaMetadata()
|
||||
if err == nil {
|
||||
return externalInstanceID, nil
|
||||
}
|
||||
}
|
||||
// if we are asking about the current instance, just go to metadata
|
||||
if gce.isCurrentInstance(instance) {
|
||||
return gce.externalID, nil
|
||||
}
|
||||
|
||||
// Fallback to GCE API call if metadata server fails to retrieve ID
|
||||
inst, err := gce.getInstanceByName(instance)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -1540,29 +1494,7 @@ func (gce *GCECloud) GetZone() (cloudprovider.Zone, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) CreateDisk(name string, sizeGb int64) error {
|
||||
diskToCreate := &compute.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
createOp, err := gce.service.Disks.Insert(gce.projectID, gce.zone, diskToCreate).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return gce.waitForZoneOp(createOp)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DeleteDisk(diskToDelete string) error {
|
||||
deleteOp, err := gce.service.Disks.Delete(gce.projectID, gce.zone, diskToDelete).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return gce.waitForZoneOp(deleteOp)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) error {
|
||||
func (gce *GCECloud) AttachDisk(diskName string, readOnly bool) error {
|
||||
disk, err := gce.getDisk(diskName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1573,7 +1505,7 @@ func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) erro
|
|||
}
|
||||
attachedDisk := gce.convertDiskToAttachedDisk(disk, readWrite)
|
||||
|
||||
attachOp, err := gce.service.Instances.AttachDisk(gce.projectID, gce.zone, instanceID, attachedDisk).Do()
|
||||
attachOp, err := gce.service.Instances.AttachDisk(gce.projectID, gce.zone, gce.instanceID, attachedDisk).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1581,8 +1513,8 @@ func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) erro
|
|||
return gce.waitForZoneOp(attachOp)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DetachDisk(devicePath, instanceID string) error {
|
||||
detachOp, err := gce.service.Instances.DetachDisk(gce.projectID, gce.zone, instanceID, devicePath).Do()
|
||||
func (gce *GCECloud) DetachDisk(devicePath string) error {
|
||||
detachOp, err := gce.service.Instances.DetachDisk(gce.projectID, gce.zone, gce.instanceID, devicePath).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1590,22 +1522,6 @@ func (gce *GCECloud) DetachDisk(devicePath, instanceID string) error {
|
|||
return gce.waitForZoneOp(detachOp)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
||||
instance, err := gce.service.Instances.Get(gce.projectID, gce.zone, instanceID).Do()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, disk := range instance.Disks {
|
||||
if disk.DeviceName == diskName {
|
||||
// Disk is still attached to node
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) getDisk(diskName string) (*compute.Disk, error) {
|
||||
return gce.service.Disks.Get(gce.projectID, gce.zone, diskName).Do()
|
||||
}
|
||||
|
|
|
@ -294,7 +294,3 @@ func (f *PersistentVolumeRecycler) GetMounter() mount.Interface {
|
|||
func (f *PersistentVolumeRecycler) GetWriter() ioutil.Writer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *PersistentVolumeRecycler) GetHostName() string {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -93,11 +93,6 @@ func (vh *volumeHost) GetWriter() io.Writer {
|
|||
return vh.kubelet.writer
|
||||
}
|
||||
|
||||
// Returns the hostname of the host kubelet is running on
|
||||
func (vh *volumeHost) GetHostName() string {
|
||||
return vh.kubelet.hostname
|
||||
}
|
||||
|
||||
func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
|
||||
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
|
||||
if err != nil {
|
||||
|
|
|
@ -132,7 +132,7 @@ func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet sets.String) (
|
|||
glog.Warningf("Retrying attach for GCE PD %q (retry count=%v).", b.pdName, numRetries)
|
||||
}
|
||||
|
||||
if err := gceCloud.AttachDisk(b.pdName, b.plugin.host.GetHostName(), b.readOnly); err != nil {
|
||||
if err := gceCloud.AttachDisk(b.pdName, b.readOnly); err != nil {
|
||||
glog.Errorf("Error attaching PD %q: %v", b.pdName, err)
|
||||
time.Sleep(errorSleepDuration)
|
||||
continue
|
||||
|
@ -206,7 +206,7 @@ func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
|
|||
glog.Warningf("Retrying detach for GCE PD %q (retry count=%v).", c.pdName, numRetries)
|
||||
}
|
||||
|
||||
if err := gceCloud.DetachDisk(c.pdName, c.plugin.host.GetHostName()); err != nil {
|
||||
if err := gceCloud.DetachDisk(c.pdName); err != nil {
|
||||
glog.Errorf("Error detaching PD %q: %v", c.pdName, err)
|
||||
time.Sleep(errorSleepDuration)
|
||||
continue
|
||||
|
|
|
@ -156,9 +156,6 @@ type VolumeHost interface {
|
|||
|
||||
// Get writer interface for writing data to disk.
|
||||
GetWriter() io.Writer
|
||||
|
||||
// Returns the hostname of the host kubelet is running on
|
||||
GetHostName() string
|
||||
}
|
||||
|
||||
// VolumePluginMgr tracks registered plugins.
|
||||
|
|
|
@ -92,11 +92,6 @@ func (f *fakeVolumeHost) NewWrapperCleaner(spec *Spec, podUID types.UID) (Cleane
|
|||
return plug.NewCleaner(spec.Name(), podUID)
|
||||
}
|
||||
|
||||
// Returns the hostname of the host kubelet is running on
|
||||
func (f *fakeVolumeHost) GetHostName() string {
|
||||
return "fakeHostName"
|
||||
}
|
||||
|
||||
func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
||||
if _, ok := config.OtherAttributes["fake-property"]; ok {
|
||||
return []VolumePlugin{
|
||||
|
|
|
@ -33,7 +33,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
|
@ -102,16 +101,6 @@ func TestE2E(t *testing.T) {
|
|||
glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
|
||||
}
|
||||
|
||||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
var err error
|
||||
Logf("Fetching cloud provider for %q\r\n", testContext.Provider)
|
||||
cloudConfig.Provider, err = gcecloud.CreateGCECloud(testContext.CloudConfig.ProjectID, testContext.CloudConfig.Zone, "" /* networkUrl */, nil /* tokenSource */, false /* useMetadataServer */)
|
||||
if err != nil {
|
||||
glog.Fatal("Error building GCE provider: ", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if testContext.Provider == "aws" {
|
||||
awsConfig := "[Global]\n"
|
||||
if cloudConfig.Zone == "" {
|
||||
|
|
150
test/e2e/pd.go
150
test/e2e/pd.go
|
@ -18,8 +18,8 @@ package e2e
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"google.golang.org/api/googleapi"
|
||||
mathrand "math/rand"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
@ -70,7 +69,7 @@ var _ = Describe("Pod Disks", func() {
|
|||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
By("creating PD")
|
||||
diskName, err := createPDWithRetry()
|
||||
diskName, err := createPD()
|
||||
expectNoError(err, "Error creating PD")
|
||||
|
||||
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||
|
@ -78,12 +77,14 @@ var _ = Describe("Pod Disks", func() {
|
|||
containerName := "mycontainer"
|
||||
|
||||
defer func() {
|
||||
By("cleaning up PD-RW test environment")
|
||||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
By("cleaning up PD-RW test environment")
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
|
||||
detachAndDeletePDs(diskName, []string{host0Name, host1Name})
|
||||
detachPD(host0Name, diskName)
|
||||
detachPD(host1Name, diskName)
|
||||
deletePDWithRetry(diskName)
|
||||
}()
|
||||
|
||||
By("submitting host0Pod to kubernetes")
|
||||
|
@ -116,6 +117,9 @@ var _ = Describe("Pod Disks", func() {
|
|||
By("deleting host1Pod")
|
||||
expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
|
||||
|
||||
By(fmt.Sprintf("deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
|
@ -123,7 +127,7 @@ var _ = Describe("Pod Disks", func() {
|
|||
SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
By("creating PD")
|
||||
diskName, err := createPDWithRetry()
|
||||
diskName, err := createPD()
|
||||
expectNoError(err, "Error creating PD")
|
||||
|
||||
rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||
|
@ -137,7 +141,10 @@ var _ = Describe("Pod Disks", func() {
|
|||
podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
|
||||
detachAndDeletePDs(diskName, []string{host0Name, host1Name})
|
||||
|
||||
detachPD(host0Name, diskName)
|
||||
detachPD(host1Name, diskName)
|
||||
deletePDWithRetry(diskName)
|
||||
}()
|
||||
|
||||
By("submitting rwPod to ensure PD is formatted")
|
||||
|
@ -164,13 +171,18 @@ var _ = Describe("Pod Disks", func() {
|
|||
|
||||
By("deleting host1ROPod")
|
||||
expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
|
||||
|
||||
By(fmt.Sprintf("deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
|
||||
expectNoError(err, "Error deleting PD")
|
||||
})
|
||||
|
||||
It("should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession", func() {
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
By("creating PD")
|
||||
diskName, err := createPDWithRetry()
|
||||
diskName, err := createPD()
|
||||
expectNoError(err, "Error creating PD")
|
||||
numContainers := 4
|
||||
|
||||
|
@ -181,7 +193,8 @@ var _ = Describe("Pod Disks", func() {
|
|||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
detachAndDeletePDs(diskName, []string{host0Name})
|
||||
detachPD(host0Name, diskName)
|
||||
deletePDWithRetry(diskName)
|
||||
}()
|
||||
|
||||
fileAndContentToVerify := make(map[string]string)
|
||||
|
@ -212,16 +225,21 @@ var _ = Describe("Pod Disks", func() {
|
|||
By("deleting host0Pod")
|
||||
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
It("should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession", func() {
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
By("creating PD1")
|
||||
disk1Name, err := createPDWithRetry()
|
||||
disk1Name, err := createPD()
|
||||
expectNoError(err, "Error creating PD1")
|
||||
By("creating PD2")
|
||||
disk2Name, err := createPDWithRetry()
|
||||
disk2Name, err := createPD()
|
||||
expectNoError(err, "Error creating PD2")
|
||||
|
||||
host0Pod := testPDPod([]string{disk1Name, disk2Name}, host0Name, false /* readOnly */, 1 /* numContainers */)
|
||||
|
@ -231,8 +249,10 @@ var _ = Describe("Pod Disks", func() {
|
|||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
detachAndDeletePDs(disk1Name, []string{host0Name})
|
||||
detachAndDeletePDs(disk2Name, []string{host0Name})
|
||||
detachPD(host0Name, disk1Name)
|
||||
detachPD(host0Name, disk2Name)
|
||||
deletePDWithRetry(disk1Name)
|
||||
deletePDWithRetry(disk2Name)
|
||||
}()
|
||||
|
||||
containerName := "mycontainer"
|
||||
|
@ -266,23 +286,16 @@ var _ = Describe("Pod Disks", func() {
|
|||
By("deleting host0Pod")
|
||||
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("deleting PD1 %q", disk1Name))
|
||||
deletePDWithRetry(disk1Name)
|
||||
By(fmt.Sprintf("deleting PD2 %q", disk2Name))
|
||||
deletePDWithRetry(disk2Name)
|
||||
|
||||
return
|
||||
})
|
||||
})
|
||||
|
||||
func createPDWithRetry() (string, error) {
|
||||
newDiskName := ""
|
||||
var err error
|
||||
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
|
||||
if newDiskName, err = createPD(); err != nil {
|
||||
Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err)
|
||||
continue
|
||||
}
|
||||
Logf("Successfully created a new PD: %q.", newDiskName)
|
||||
break
|
||||
}
|
||||
return newDiskName, err
|
||||
}
|
||||
|
||||
func deletePDWithRetry(diskName string) {
|
||||
var err error
|
||||
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
|
||||
|
@ -290,7 +303,7 @@ func deletePDWithRetry(diskName string) {
|
|||
Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err)
|
||||
continue
|
||||
}
|
||||
Logf("Successfully deleted PD %q.", diskName)
|
||||
Logf("Deleted PD %v", diskName)
|
||||
break
|
||||
}
|
||||
expectNoError(err, "Error deleting PD")
|
||||
|
@ -312,12 +325,9 @@ func createPD() (string, error) {
|
|||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))
|
||||
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = gceCloud.CreateDisk(pdName, 10 /* sizeGb */)
|
||||
zone := testContext.CloudConfig.Zone
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
err := exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "disks", "create", "--zone="+zone, "--size=10GB", pdName).Run()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -335,20 +345,19 @@ func createPD() (string, error) {
|
|||
|
||||
func deletePD(pdName string) error {
|
||||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gceCloud.DeleteDisk(pdName)
|
||||
zone := testContext.CloudConfig.Zone
|
||||
|
||||
// TODO: make this hit the compute API directly.
|
||||
cmd := exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "disks", "delete", "--zone="+zone, pdName)
|
||||
data, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
|
||||
// PD already exists, ignore error.
|
||||
dataStr := string(data)
|
||||
if strings.Contains(dataStr, "was not found") {
|
||||
Logf("PD deletion implicitly succeeded because PD %q does not exist.", pdName)
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Error deleting PD %q: %v", pdName, err)
|
||||
Logf("Error deleting PD: %s (%v)", dataStr, err)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
|
@ -364,23 +373,10 @@ func detachPD(hostName, pdName string) error {
|
|||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
instanceName := strings.Split(hostName, ".")[0]
|
||||
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gceCloud.DetachDisk(pdName, instanceName)
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && strings.Contains(gerr.Message, "Invalid value for field 'disk'") {
|
||||
// PD already detached, ignore error.
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Error detaching PD %q: %v", pdName, err)
|
||||
}
|
||||
|
||||
return err
|
||||
zone := testContext.CloudConfig.Zone
|
||||
|
||||
// TODO: make this hit the compute API directly.
|
||||
return exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "detach-disk", "--zone="+zone, "--disk="+pdName, instanceName).Run()
|
||||
} else {
|
||||
volumes, ok := testContext.CloudConfig.Provider.(awscloud.Volumes)
|
||||
if !ok {
|
||||
|
@ -461,19 +457,19 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
|
|||
// Waits for specified PD to to detach from specified hostName
|
||||
func waitForPDDetach(diskName, hostName string) error {
|
||||
if testContext.Provider == "gce" || testContext.Provider == "gke" {
|
||||
gceCloud, err := getGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
|
||||
diskAttached, err := gceCloud.DiskIsAttached(diskName, hostName)
|
||||
zone := testContext.CloudConfig.Zone
|
||||
|
||||
cmd := exec.Command("gcloud", "compute", "--project="+testContext.CloudConfig.ProjectID, "instances", "describe", "--zone="+zone, hostName)
|
||||
data, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, hostName, err)
|
||||
Logf("Error waiting for PD %q to detach from node %q. 'gcloud compute instances describe' failed with %s (%v)", diskName, hostName, string(data), err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !diskAttached {
|
||||
dataStr := strings.ToLower(string(data))
|
||||
diskName = strings.ToLower(diskName)
|
||||
if !strings.Contains(string(dataStr), diskName) {
|
||||
// Specified disk does not appear to be attached to specified node
|
||||
Logf("GCE PD %q appears to have successfully detached from %q.", diskName, hostName)
|
||||
return nil
|
||||
|
@ -487,23 +483,3 @@ func waitForPDDetach(diskName, hostName string) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getGCECloud() (*gcecloud.GCECloud, error) {
|
||||
gceCloud, ok := testContext.CloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", testContext.CloudConfig.Provider)
|
||||
}
|
||||
|
||||
return gceCloud, nil
|
||||
}
|
||||
|
||||
func detachAndDeletePDs(diskName string, hosts []string) {
|
||||
for _, host := range hosts {
|
||||
detachPD(host, diskName)
|
||||
By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host))
|
||||
waitForPDDetach(diskName, host)
|
||||
}
|
||||
By(fmt.Sprintf("Deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue