mirror of https://github.com/k3s-io/k3s
refector gec-pd to seperate builder and cleaner
parent
788012ae7b
commit
27524d6b36
|
@ -85,18 +85,19 @@ func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, pod
|
|||
}
|
||||
readOnly := gce.ReadOnly
|
||||
|
||||
return &gcePersistentDisk{
|
||||
podUID: podUID,
|
||||
volName: spec.Name,
|
||||
pdName: pdName,
|
||||
return &gcePersistentDiskBuilder{
|
||||
gcePersistentDisk: &gcePersistentDisk{
|
||||
podUID: podUID,
|
||||
volName: spec.Name,
|
||||
pdName: pdName,
|
||||
partition: partition,
|
||||
mounter: mounter,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
},
|
||||
fsType: fsType,
|
||||
partition: partition,
|
||||
readOnly: readOnly,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
diskMounter: &gceSafeFormatAndMount{mounter, exec.New()},
|
||||
plugin: plugin,
|
||||
}, nil
|
||||
diskMounter: &gceSafeFormatAndMount{mounter, exec.New()}}, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
|
||||
|
@ -105,22 +106,21 @@ func (plugin *gcePersistentDiskPlugin) NewCleaner(volName string, podUID types.U
|
|||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) newCleanerInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Cleaner, error) {
|
||||
return &gcePersistentDisk{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
diskMounter: &gceSafeFormatAndMount{mounter, exec.New()},
|
||||
plugin: plugin,
|
||||
}, nil
|
||||
return &gcePersistentDiskCleaner{&gcePersistentDisk{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
// Abstract interface to PD operations.
|
||||
type pdManager interface {
|
||||
// Attaches the disk to the kubelet's host machine.
|
||||
AttachAndMountDisk(pd *gcePersistentDisk, globalPDPath string) error
|
||||
AttachAndMountDisk(b *gcePersistentDiskBuilder, globalPDPath string) error
|
||||
// Detaches the disk from the kubelet's host machine.
|
||||
DetachDisk(pd *gcePersistentDisk) error
|
||||
DetachDisk(c *gcePersistentDiskCleaner) error
|
||||
}
|
||||
|
||||
// gcePersistentDisk volumes are disk resources provided by Google Compute Engine
|
||||
|
@ -130,37 +130,43 @@ type gcePersistentDisk struct {
|
|||
podUID types.UID
|
||||
// Unique identifier of the PD, used to find the disk resource in the provider.
|
||||
pdName string
|
||||
// Filesystem type, optional.
|
||||
fsType string
|
||||
// Specifies the partition to mount
|
||||
partition string
|
||||
// Specifies whether the disk will be attached as read-only.
|
||||
readOnly bool
|
||||
// Utility interface that provides API calls to the provider to attach/detach disks.
|
||||
manager pdManager
|
||||
// Mounter interface that provides system calls to mount the global path to the pod local path.
|
||||
mounter mount.Interface
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
diskMounter mount.Interface
|
||||
plugin *gcePersistentDiskPlugin
|
||||
plugin *gcePersistentDiskPlugin
|
||||
}
|
||||
|
||||
func detachDiskLogError(pd *gcePersistentDisk) {
|
||||
err := pd.manager.DetachDisk(pd)
|
||||
err := pd.manager.DetachDisk(&gcePersistentDiskCleaner{pd})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to detach disk: %v (%v)", pd, err)
|
||||
}
|
||||
}
|
||||
|
||||
type gcePersistentDiskBuilder struct {
|
||||
*gcePersistentDisk
|
||||
// Filesystem type, optional.
|
||||
fsType string
|
||||
// Specifies whether the disk will be attached as read-only.
|
||||
readOnly bool
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
diskMounter mount.Interface
|
||||
}
|
||||
|
||||
var _ volume.Builder = &gcePersistentDiskBuilder{}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (pd *gcePersistentDisk) SetUp() error {
|
||||
return pd.SetUpAt(pd.GetPath())
|
||||
func (b *gcePersistentDiskBuilder) SetUp() error {
|
||||
return b.SetUpAt(b.GetPath())
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (pd *gcePersistentDisk) SetUpAt(dir string) error {
|
||||
func (b *gcePersistentDiskBuilder) SetUpAt(dir string) error {
|
||||
// TODO: handle failed mounts here.
|
||||
mountpoint, err := pd.mounter.IsMountPoint(dir)
|
||||
mountpoint, err := b.mounter.IsMountPoint(dir)
|
||||
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, mountpoint, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
|
@ -169,35 +175,35 @@ func (pd *gcePersistentDisk) SetUpAt(dir string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
globalPDPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
|
||||
if err := pd.manager.AttachAndMountDisk(pd, globalPDPath); err != nil {
|
||||
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||
if err := b.manager.AttachAndMountDisk(b, globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
// TODO: we should really eject the attach/detach out into its own control loop.
|
||||
detachDiskLogError(pd)
|
||||
detachDiskLogError(b.gcePersistentDisk)
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
options := []string{"bind"}
|
||||
if pd.readOnly {
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = pd.mounter.Mount(globalPDPath, dir, "", options)
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
if err != nil {
|
||||
mountpoint, mntErr := pd.mounter.IsMountPoint(dir)
|
||||
mountpoint, mntErr := b.mounter.IsMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("isMountpoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if mountpoint {
|
||||
if mntErr = pd.mounter.Unmount(dir); mntErr != nil {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
mountpoint, mntErr := pd.mounter.IsMountPoint(dir)
|
||||
mountpoint, mntErr := b.mounter.IsMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("isMountpoint check failed: %v", mntErr)
|
||||
return err
|
||||
|
@ -210,7 +216,7 @@ func (pd *gcePersistentDisk) SetUpAt(dir string) error {
|
|||
}
|
||||
os.Remove(dir)
|
||||
// TODO: we should really eject the attach/detach out into its own control loop.
|
||||
detachDiskLogError(pd)
|
||||
detachDiskLogError(b.gcePersistentDisk)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -226,16 +232,22 @@ func (pd *gcePersistentDisk) GetPath() string {
|
|||
return pd.plugin.host.GetPodVolumeDir(pd.podUID, util.EscapeQualifiedNameForDisk(name), pd.volName)
|
||||
}
|
||||
|
||||
type gcePersistentDiskCleaner struct {
|
||||
*gcePersistentDisk
|
||||
}
|
||||
|
||||
var _ volume.Cleaner = &gcePersistentDiskCleaner{}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (pd *gcePersistentDisk) TearDown() error {
|
||||
return pd.TearDownAt(pd.GetPath())
|
||||
func (c *gcePersistentDiskCleaner) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (pd *gcePersistentDisk) TearDownAt(dir string) error {
|
||||
mountpoint, err := pd.mounter.IsMountPoint(dir)
|
||||
func (c *gcePersistentDiskCleaner) TearDownAt(dir string) error {
|
||||
mountpoint, err := c.mounter.IsMountPoint(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -243,24 +255,24 @@ func (pd *gcePersistentDisk) TearDownAt(dir string) error {
|
|||
return os.Remove(dir)
|
||||
}
|
||||
|
||||
refs, err := mount.GetMountRefs(pd.mounter, dir)
|
||||
refs, err := mount.GetMountRefs(c.mounter, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Unmount the bind-mount inside this pod
|
||||
if err := pd.mounter.Unmount(dir); err != nil {
|
||||
if err := c.mounter.Unmount(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
// If len(refs) is 1, then all bind mounts have been removed, and the
|
||||
// remaining reference is the global mount. It is safe to detach.
|
||||
if len(refs) == 1 {
|
||||
// pd.pdName is not initially set for volume-cleaners, so set it here.
|
||||
pd.pdName = path.Base(refs[0])
|
||||
if err := pd.manager.DetachDisk(pd); err != nil {
|
||||
// c.pdName is not initially set for volume-cleaners, so set it here.
|
||||
c.pdName = path.Base(refs[0])
|
||||
if err := c.manager.DetachDisk(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
mountpoint, mntErr := pd.mounter.IsMountPoint(dir)
|
||||
mountpoint, mntErr := c.mounter.IsMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("isMountpoint check failed: %v", mntErr)
|
||||
return err
|
||||
|
|
|
@ -74,8 +74,8 @@ type fakePDManager struct {
|
|||
|
||||
// TODO(jonesdl) To fully test this, we could create a loopback device
|
||||
// and mount that instead.
|
||||
func (fake *fakePDManager) AttachAndMountDisk(pd *gcePersistentDisk, globalPDPath string) error {
|
||||
globalPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
|
||||
func (fake *fakePDManager) AttachAndMountDisk(b *gcePersistentDiskBuilder, globalPDPath string) error {
|
||||
globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||
err := os.MkdirAll(globalPath, 0750)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -83,12 +83,12 @@ func (fake *fakePDManager) AttachAndMountDisk(pd *gcePersistentDisk, globalPDPat
|
|||
fake.attachCalled = true
|
||||
// Simulate the global mount so that the fakeMounter returns the
|
||||
// expected number of mounts for the attached disk.
|
||||
pd.mounter.Mount(globalPath, globalPath, pd.fsType, nil)
|
||||
b.mounter.Mount(globalPath, globalPath, b.fsType, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DetachDisk(pd *gcePersistentDisk) error {
|
||||
globalPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
|
||||
func (fake *fakePDManager) DetachDisk(c *gcePersistentDiskCleaner) error {
|
||||
globalPath := makeGlobalPDName(c.plugin.host, c.pdName)
|
||||
err := os.RemoveAll(globalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -53,11 +53,11 @@ type GCEDiskUtil struct{}
|
|||
|
||||
// Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet.
|
||||
// Mounts the disk to it's global path.
|
||||
func (diskUtil *GCEDiskUtil) AttachAndMountDisk(pd *gcePersistentDisk, globalPDPath string) error {
|
||||
glog.V(5).Infof("AttachAndMountDisk(pd, %q) where pd is %#v\r\n", globalPDPath, pd)
|
||||
func (diskUtil *GCEDiskUtil) AttachAndMountDisk(b *gcePersistentDiskBuilder, globalPDPath string) error {
|
||||
glog.V(5).Infof("AttachAndMountDisk(b, %q) where b is %#v\r\n", globalPDPath, b)
|
||||
|
||||
// Block execution until any pending detach goroutines for this pd have completed
|
||||
detachCleanupManager.Send(pd.pdName, true)
|
||||
detachCleanupManager.Send(b.pdName, true)
|
||||
|
||||
sdBefore, err := filepath.Glob(diskSDPattern)
|
||||
if err != nil {
|
||||
|
@ -65,13 +65,13 @@ func (diskUtil *GCEDiskUtil) AttachAndMountDisk(pd *gcePersistentDisk, globalPDP
|
|||
}
|
||||
sdBeforeSet := util.NewStringSet(sdBefore...)
|
||||
|
||||
devicePath, err := attachDiskAndVerify(pd, sdBeforeSet)
|
||||
devicePath, err := attachDiskAndVerify(b, sdBeforeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Only mount the PD globally once.
|
||||
mountpoint, err := pd.mounter.IsMountPoint(globalPDPath)
|
||||
mountpoint, err := b.mounter.IsMountPoint(globalPDPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
|
||||
|
@ -83,11 +83,11 @@ func (diskUtil *GCEDiskUtil) AttachAndMountDisk(pd *gcePersistentDisk, globalPDP
|
|||
}
|
||||
}
|
||||
options := []string{}
|
||||
if pd.readOnly {
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if !mountpoint {
|
||||
err = pd.diskMounter.Mount(devicePath, globalPDPath, pd.fsType, options)
|
||||
err = b.diskMounter.Mount(devicePath, globalPDPath, b.fsType, options)
|
||||
if err != nil {
|
||||
os.Remove(globalPDPath)
|
||||
return err
|
||||
|
@ -97,32 +97,32 @@ func (diskUtil *GCEDiskUtil) AttachAndMountDisk(pd *gcePersistentDisk, globalPDP
|
|||
}
|
||||
|
||||
// Unmounts the device and detaches the disk from the kubelet's host machine.
|
||||
func (util *GCEDiskUtil) DetachDisk(pd *gcePersistentDisk) error {
|
||||
func (util *GCEDiskUtil) DetachDisk(c *gcePersistentDiskCleaner) error {
|
||||
// Unmount the global PD mount, which should be the only one.
|
||||
globalPDPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
|
||||
glog.V(5).Infof("DetachDisk(pd) where pd is %#v and the globalPDPath is %q\r\n", pd, globalPDPath)
|
||||
globalPDPath := makeGlobalPDName(c.plugin.host, c.pdName)
|
||||
glog.V(5).Infof("DetachDisk(c) where c is %#v and the globalPDPath is %q\r\n", c, globalPDPath)
|
||||
|
||||
if err := pd.mounter.Unmount(globalPDPath); err != nil {
|
||||
if err := c.mounter.Unmount(globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if detachCleanupManager.Exists(pd.pdName) {
|
||||
glog.Warningf("Terminating new DetachDisk call for GCE PD %q. A previous detach call for this PD is still pending.", pd.pdName)
|
||||
if detachCleanupManager.Exists(c.pdName) {
|
||||
glog.Warningf("Terminating new DetachDisk call for GCE PD %q. A previous detach call for this PD is still pending.", c.pdName)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Detach disk, retry if needed.
|
||||
go detachDiskAndVerify(pd)
|
||||
go detachDiskAndVerify(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
|
||||
func attachDiskAndVerify(pd *gcePersistentDisk, sdBeforeSet util.StringSet) (string, error) {
|
||||
devicePaths := getDiskByIdPaths(pd)
|
||||
func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet util.StringSet) (string, error) {
|
||||
devicePaths := getDiskByIdPaths(b.gcePersistentDisk)
|
||||
var gce cloudprovider.Interface
|
||||
for numRetries := 0; numRetries < maxRetries; numRetries++ {
|
||||
if gce == nil {
|
||||
|
@ -130,7 +130,7 @@ func attachDiskAndVerify(pd *gcePersistentDisk, sdBeforeSet util.StringSet) (str
|
|||
gce, err = cloudprovider.GetCloudProvider("gce", nil)
|
||||
if err != nil || gce == nil {
|
||||
// Retry on error. See issue #11321
|
||||
glog.Errorf("Error getting GCECloudProvider while attaching PD %q: %v", pd.pdName, err)
|
||||
glog.Errorf("Error getting GCECloudProvider while attaching PD %q: %v", b.pdName, err)
|
||||
gce = nil
|
||||
time.Sleep(errorSleepDuration)
|
||||
continue
|
||||
|
@ -138,13 +138,13 @@ func attachDiskAndVerify(pd *gcePersistentDisk, sdBeforeSet util.StringSet) (str
|
|||
}
|
||||
|
||||
if numRetries > 0 {
|
||||
glog.Warningf("Timed out waiting for GCE PD %q to attach. Retrying attach.", pd.pdName)
|
||||
glog.Warningf("Timed out waiting for GCE PD %q to attach. Retrying attach.", b.pdName)
|
||||
}
|
||||
|
||||
if err := gce.(*gce_cloud.GCECloud).AttachDisk(pd.pdName, pd.readOnly); err != nil {
|
||||
if err := gce.(*gce_cloud.GCECloud).AttachDisk(b.pdName, b.readOnly); err != nil {
|
||||
// Retry on error. See issue #11321. Continue and verify if disk is attached, because a
|
||||
// previous attach operation may still succeed.
|
||||
glog.Errorf("Error attaching PD %q: %v", pd.pdName, err)
|
||||
glog.Errorf("Error attaching PD %q: %v", b.pdName, err)
|
||||
}
|
||||
|
||||
for numChecks := 0; numChecks < maxChecks; numChecks++ {
|
||||
|
@ -159,51 +159,51 @@ func attachDiskAndVerify(pd *gcePersistentDisk, sdBeforeSet util.StringSet) (str
|
|||
glog.Errorf("Error checking if path exists: %v", err)
|
||||
} else if pathExists {
|
||||
// A device path has succesfully been created for the PD
|
||||
glog.Infof("Succesfully attached GCE PD %q.", pd.pdName)
|
||||
glog.Infof("Succesfully attached GCE PD %q.", b.pdName)
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep then check again
|
||||
glog.V(3).Infof("Waiting for GCE PD %q to attach.", pd.pdName)
|
||||
glog.V(3).Infof("Waiting for GCE PD %q to attach.", b.pdName)
|
||||
time.Sleep(checkSleepDuration)
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Could not attach GCE PD %q. Timeout waiting for mount paths to be created.", pd.pdName)
|
||||
return "", fmt.Errorf("Could not attach GCE PD %q. Timeout waiting for mount paths to be created.", b.pdName)
|
||||
}
|
||||
|
||||
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
|
||||
// This function is intended to be called asynchronously as a go routine.
|
||||
// It starts the detachCleanupManager with the specified pdName so that callers can wait for completion.
|
||||
func detachDiskAndVerify(pd *gcePersistentDisk) {
|
||||
glog.V(5).Infof("detachDiskAndVerify for pd %q.", pd.pdName)
|
||||
func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
|
||||
glog.V(5).Infof("detachDiskAndVerify for pd %q.", c.pdName)
|
||||
defer util.HandleCrash()
|
||||
|
||||
// Start operation, so that other threads can wait on this detach operation.
|
||||
// Set bufferSize to 0 so senders are blocked on send until we recieve.
|
||||
ch, err := detachCleanupManager.Start(pd.pdName, 0 /* bufferSize */)
|
||||
ch, err := detachCleanupManager.Start(c.pdName, 0 /* bufferSize */)
|
||||
if err != nil {
|
||||
glog.Errorf("Error adding %q to detachCleanupManager: %v", pd.pdName, err)
|
||||
glog.Errorf("Error adding %q to detachCleanupManager: %v", c.pdName, err)
|
||||
return
|
||||
}
|
||||
|
||||
defer detachCleanupManager.Close(pd.pdName)
|
||||
defer detachCleanupManager.Close(c.pdName)
|
||||
|
||||
defer func() {
|
||||
// Unblock any callers that have been waiting for this detach routine to complete.
|
||||
for {
|
||||
select {
|
||||
case <-ch:
|
||||
glog.V(5).Infof("detachDiskAndVerify for pd %q clearing chan.", pd.pdName)
|
||||
glog.V(5).Infof("detachDiskAndVerify for pd %q clearing chan.", c.pdName)
|
||||
default:
|
||||
glog.V(5).Infof("detachDiskAndVerify for pd %q done clearing chans.", pd.pdName)
|
||||
glog.V(5).Infof("detachDiskAndVerify for pd %q done clearing chans.", c.pdName)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
devicePaths := getDiskByIdPaths(pd)
|
||||
devicePaths := getDiskByIdPaths(c.gcePersistentDisk)
|
||||
var gce cloudprovider.Interface
|
||||
for numRetries := 0; numRetries < maxRetries; numRetries++ {
|
||||
if gce == nil {
|
||||
|
@ -211,7 +211,7 @@ func detachDiskAndVerify(pd *gcePersistentDisk) {
|
|||
gce, err = cloudprovider.GetCloudProvider("gce", nil)
|
||||
if err != nil || gce == nil {
|
||||
// Retry on error. See issue #11321
|
||||
glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", pd.pdName, err)
|
||||
glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", c.pdName, err)
|
||||
gce = nil
|
||||
time.Sleep(errorSleepDuration)
|
||||
continue
|
||||
|
@ -219,13 +219,13 @@ func detachDiskAndVerify(pd *gcePersistentDisk) {
|
|||
}
|
||||
|
||||
if numRetries > 0 {
|
||||
glog.Warningf("Timed out waiting for GCE PD %q to detach. Retrying detach.", pd.pdName)
|
||||
glog.Warningf("Timed out waiting for GCE PD %q to detach. Retrying detach.", c.pdName)
|
||||
}
|
||||
|
||||
if err := gce.(*gce_cloud.GCECloud).DetachDisk(pd.pdName); err != nil {
|
||||
if err := gce.(*gce_cloud.GCECloud).DetachDisk(c.pdName); err != nil {
|
||||
// Retry on error. See issue #11321. Continue and verify if disk is detached, because a
|
||||
// previous detach operation may still succeed.
|
||||
glog.Errorf("Error detaching PD %q: %v", pd.pdName, err)
|
||||
glog.Errorf("Error detaching PD %q: %v", c.pdName, err)
|
||||
}
|
||||
|
||||
for numChecks := 0; numChecks < maxChecks; numChecks++ {
|
||||
|
@ -244,18 +244,18 @@ func detachDiskAndVerify(pd *gcePersistentDisk) {
|
|||
}
|
||||
if allPathsRemoved {
|
||||
// All paths to the PD have been succefully removed
|
||||
glog.Infof("Succesfully detached GCE PD %q.", pd.pdName)
|
||||
glog.Infof("Succesfully detached GCE PD %q.", c.pdName)
|
||||
return
|
||||
}
|
||||
|
||||
// Sleep then check again
|
||||
glog.V(3).Infof("Waiting for GCE PD %q to detach.", pd.pdName)
|
||||
glog.V(3).Infof("Waiting for GCE PD %q to detach.", c.pdName)
|
||||
time.Sleep(checkSleepDuration)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
glog.Errorf("Failed to detach GCE PD %q. One or more mount paths was not removed.", pd.pdName)
|
||||
glog.Errorf("Failed to detach GCE PD %q. One or more mount paths was not removed.", c.pdName)
|
||||
}
|
||||
|
||||
// Returns list of all /dev/disk/by-id/* paths for given PD.
|
||||
|
|
Loading…
Reference in New Issue