Check claimRef UID when processing a recycled PV, take 2

Reorder code a bit so it doesn't allow a case where you get some error other than "not found"
combined with a non-nil Claim.

Add test case.
pull/6/head
Andy Goldstein 2016-03-28 15:25:17 -04:00
parent e01feae75a
commit 5ce2e6d576
2 changed files with 111 additions and 7 deletions

View File

@ -230,15 +230,19 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
switch currentPhase { switch currentPhase {
case api.VolumePending: case api.VolumePending:
// 3 possible states: // 4 possible states:
// 1. ClaimRef != nil and Claim exists: Prebound to claim. Make volume available for binding (it will match PVC). // 1. ClaimRef != nil, Claim exists, Claim UID == ClaimRef UID: Prebound to claim. Make volume available for binding (it will match PVC).
// 2. ClaimRef != nil and Claim !exists: Recently recycled. Remove bind. Make volume available for new claim. // 2. ClaimRef != nil, Claim exists, Claim UID != ClaimRef UID: Recently recycled. Remove bind. Make volume available for new claim.
// 3. ClaimRef == nil: Neither recycled nor prebound. Make volume available for binding. // 3. ClaimRef != nil, Claim !exists: Recently recycled. Remove bind. Make volume available for new claim.
// 4. ClaimRef == nil: Neither recycled nor prebound. Make volume available for binding.
nextPhase = api.VolumeAvailable nextPhase = api.VolumeAvailable
if volume.Spec.ClaimRef != nil { if volume.Spec.ClaimRef != nil {
claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name) claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
if errors.IsNotFound(err) || (claim != nil && claim.UID != volume.Spec.ClaimRef.UID) { switch {
case err != nil && !errors.IsNotFound(err):
return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err)
case errors.IsNotFound(err) || (claim != nil && claim.UID != volume.Spec.ClaimRef.UID):
if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle { if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle {
// Pending volumes that have a ClaimRef where the claim is missing were recently recycled. // Pending volumes that have a ClaimRef where the claim is missing were recently recycled.
// The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle. // The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle.
@ -265,8 +269,6 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
// Mark the volume as Released, it will be deleted. // Mark the volume as Released, it will be deleted.
nextPhase = api.VolumeReleased nextPhase = api.VolumeReleased
} }
} else if err != nil {
return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err)
} }
// Dynamically provisioned claims remain Pending until its volume is completely provisioned. // Dynamically provisioned claims remain Pending until its volume is completely provisioned.

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/types"
utiltesting "k8s.io/kubernetes/pkg/util/testing" utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/host_path" "k8s.io/kubernetes/pkg/volume/host_path"
@ -566,6 +567,107 @@ func TestCasting(t *testing.T) {
binder.updateClaim(pvc, pvc) binder.updateClaim(pvc, pvc)
} }
func TestRecycledPersistentVolumeUID(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("claimbinder-test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
codec := api.Codecs.UniversalDecoder()
o := core.NewObjects(api.Scheme, codec)
if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, codec); err != nil {
t.Fatal(err)
}
if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, codec); err != nil {
t.Fatal(err)
}
clientset := &fake.Clientset{}
clientset.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper()))
pv, err := clientset.Core().PersistentVolumes().Get("any")
if err != nil {
t.Errorf("Unexpected error getting PV from client: %v", err)
}
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle
if err != nil {
t.Errorf("Unexpected error getting PV from client: %v", err)
}
pv.ObjectMeta.SelfLink = testapi.Default.SelfLink("pv", "")
// the default value of the PV is Pending. if processed at least once, its status in etcd is Available.
// There was a bug where only Pending volumes were being indexed and made ready for claims.
// Test that !Pending gets correctly added
pv.Status.Phase = api.VolumeAvailable
claim, error := clientset.Core().PersistentVolumeClaims("ns").Get("any")
if error != nil {
t.Errorf("Unexpected error getting PVC from client: %v", err)
}
claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
claim.ObjectMeta.UID = types.UID("uid1")
volumeIndex := NewPersistentVolumeOrderedIndex()
mockClient := &mockBinderClient{
volume: pv,
claim: claim,
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
recycler := &PersistentVolumeRecycler{
kubeClient: clientset,
client: mockClient,
pluginMgr: plugMgr,
}
// adds the volume to the index, making the volume available
syncVolume(volumeIndex, mockClient, pv)
if mockClient.volume.Status.Phase != api.VolumeAvailable {
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
}
// add the claim to fake API server
mockClient.UpdatePersistentVolumeClaim(claim)
// an initial sync for a claim will bind it to an unbound volume
syncClaim(volumeIndex, mockClient, claim)
// pretend the user deleted their claim. periodic resync picks it up.
mockClient.claim = nil
syncVolume(volumeIndex, mockClient, mockClient.volume)
if mockClient.volume.Status.Phase != api.VolumeReleased {
t.Errorf("Expected phase %s but got %s", api.VolumeReleased, mockClient.volume.Status.Phase)
}
// released volumes with a PersistentVolumeReclaimPolicy (recycle/delete) can have further processing
err = recycler.reclaimVolume(mockClient.volume)
if err != nil {
t.Errorf("Unexpected error reclaiming volume: %+v", err)
}
if mockClient.volume.Status.Phase != api.VolumePending {
t.Errorf("Expected phase %s but got %s", api.VolumePending, mockClient.volume.Status.Phase)
}
// after the recycling changes the phase to Pending, the binder picks up again
// to remove any vestiges of binding and make the volume Available again
//
// explicitly set the claim's UID to a different value to ensure that a new claim with the same
// name as what the PV was previously bound still yields an available volume
claim.ObjectMeta.UID = types.UID("uid2")
mockClient.claim = claim
syncVolume(volumeIndex, mockClient, mockClient.volume)
if mockClient.volume.Status.Phase != api.VolumeAvailable {
t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, mockClient.volume.Status.Phase)
}
if mockClient.volume.Spec.ClaimRef != nil {
t.Errorf("Expected nil ClaimRef: %+v", mockClient.volume.Spec.ClaimRef)
}
}
type mockBinderClient struct { type mockBinderClient struct {
volume *api.PersistentVolume volume *api.PersistentVolume
claim *api.PersistentVolumeClaim claim *api.PersistentVolumeClaim