mirror of https://github.com/k3s-io/k3s
verify UID when releasing and binding volumes
parent
bd7255a27e
commit
1ad2df6b61
|
@ -281,13 +281,19 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
|||
if volume.Spec.ClaimRef == nil {
|
||||
return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
|
||||
} else {
|
||||
_, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
|
||||
claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
|
||||
|
||||
// A volume is Released when its bound claim cannot be found in the API server.
|
||||
// A claim by the same name can be found if deleted and recreated before this controller can release
|
||||
// the volume from the original claim, so a UID check is necessary.
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
nextPhase = api.VolumeReleased
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else if claim != nil && claim.UID != volume.Spec.ClaimRef.UID {
|
||||
nextPhase = api.VolumeReleased
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -143,6 +143,68 @@ func TestClaimRace(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewClaimWithSameNameAsOldClaim(t *testing.T) {
|
||||
c1 := &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "c1",
|
||||
Namespace: "foo",
|
||||
UID: "12345",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PersistentVolumeClaimStatus{
|
||||
Phase: api.ClaimBound,
|
||||
},
|
||||
}
|
||||
c1.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", "")
|
||||
|
||||
v := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
ClaimRef: &api.ObjectReference{
|
||||
Name: c1.Name,
|
||||
Namespace: c1.Namespace,
|
||||
UID: "45678",
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
Path: "/tmp/data01",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PersistentVolumeStatus{
|
||||
Phase: api.VolumeBound,
|
||||
},
|
||||
}
|
||||
|
||||
volumeIndex := NewPersistentVolumeOrderedIndex()
|
||||
mockClient := &mockBinderClient{
|
||||
claim: c1,
|
||||
volume: v,
|
||||
}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||
|
||||
syncVolume(volumeIndex, mockClient, v)
|
||||
if mockClient.volume.Status.Phase != api.VolumeReleased {
|
||||
t.Errorf("Expected phase %s but got %s", api.VolumeReleased, mockClient.volume.Status.Phase)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestClaimSyncAfterVolumeProvisioning(t *testing.T) {
|
||||
// Tests that binder.syncVolume will also syncClaim if the PV has completed
|
||||
// provisioning but the claim is still Pending. We want to advance to Bound
|
||||
|
|
|
@ -117,7 +117,7 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo
|
|||
continue
|
||||
}
|
||||
|
||||
if claim.Name == volume.Spec.ClaimRef.Name && claim.Namespace == volume.Spec.ClaimRef.Namespace {
|
||||
if claim.Name == volume.Spec.ClaimRef.Name && claim.Namespace == volume.Spec.ClaimRef.Namespace && claim.UID == volume.Spec.ClaimRef.UID {
|
||||
// exact match! No search required.
|
||||
return volume, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue