2016-05-17 12:55:15 +00:00
|
|
|
/*
|
2016-06-03 00:25:58 +00:00
|
|
|
Copyright 2016 The Kubernetes Authors.
|
2016-05-17 12:55:15 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package persistentvolume
|
|
|
|
|
|
|
|
import (
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/golang/glog"
|
2016-11-18 20:50:17 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/v1"
|
volume controller: Add cache with the latest version of PVs and PVCs
When the controller binds a PV to PVC, it saves both objects to etcd.
However, there is still an old version of these objects in the controller
Informer cache. So, when a new PVC comes, the PV is still seen as available
and may get bound to the new PVC. This will be blocked by etcd, still, it
creates unnecessary traffic that slows everything down.
Also, we save bound PV/PVC as two transactions - we save PV/PVC.Spec first
and then .Status. The controller gets "PV/PVC.Spec updated" event from etcd
and tries to fix the Status, as it seems to the controller it's outdated.
This write again fails - there already is a correct version in etcd.
We can't influence the Informer cache, it is read-only to the controller.
To prevent these useless writes to etcd, this patch introduces second cache
in the controller, which holds latest and greatest version on PVs and PVCs.
It gets updated with events from etcd *and* after etcd confirms successful
save of PV/PVC modified by the controller.
The cache stores only *pointers* to PVs/PVCs, so in ideal case it shares the
actual object data with the informer cache. They will diverge only when
the controller modifies something and the informer cache did not get update
events yet.
2016-05-19 11:31:19 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/cache"
|
2016-12-14 01:18:17 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
2016-09-14 18:35:38 +00:00
|
|
|
fcache "k8s.io/kubernetes/pkg/client/testing/cache"
|
2016-05-17 12:55:15 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Test the real controller methods (add/update/delete claim/volume) with
|
|
|
|
// a fake API server.
|
|
|
|
// There is no controller API to 'initiate syncAll now', therefore these tests
|
|
|
|
// can't reliably simulate periodic sync of volumes/claims - it would be
|
|
|
|
// either very timing-sensitive or slow to wait for real periodic sync.
|
|
|
|
func TestControllerSync(t *testing.T) {
|
|
|
|
tests := []controllerTest{
|
|
|
|
// [Unit test set 5] - controller tests.
|
|
|
|
// We test the controller as if
|
|
|
|
// it was connected to real API server, i.e. we call add/update/delete
|
|
|
|
// Claim/Volume methods. Also, all changes to volumes and claims are
|
|
|
|
// sent to add/update/delete Claim/Volume as real controller would do.
|
|
|
|
{
|
|
|
|
// addClaim gets a new claim. Check it's bound to a volume.
|
|
|
|
"5-2 - complete bind",
|
2016-11-18 20:50:17 +00:00
|
|
|
newVolumeArray("volume5-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
|
|
|
|
newVolumeArray("volume5-2", "1Gi", "uid5-2", "claim5-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
2016-05-17 12:55:15 +00:00
|
|
|
noclaims, /* added in testAddClaim5_2 */
|
2016-11-18 20:50:17 +00:00
|
|
|
newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", v1.ClaimBound, annBoundByController, annBindCompleted),
|
2016-05-17 12:55:28 +00:00
|
|
|
noevents, noerrors,
|
2016-05-17 12:55:15 +00:00
|
|
|
// Custom test function that generates an add event
|
|
|
|
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
2016-11-18 20:50:17 +00:00
|
|
|
claim := newClaim("claim5-2", "uid5-2", "1Gi", "", v1.ClaimPending)
|
2016-06-01 06:35:33 +00:00
|
|
|
reactor.addClaimEvent(claim)
|
2016-05-17 12:55:15 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// deleteClaim with a bound claim makes bound volume released.
|
|
|
|
"5-3 - delete claim",
|
2016-11-18 20:50:17 +00:00
|
|
|
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
|
|
|
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
|
|
|
newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", v1.ClaimBound, annBoundByController, annBindCompleted),
|
2016-05-17 12:55:15 +00:00
|
|
|
noclaims,
|
2016-05-17 12:55:28 +00:00
|
|
|
noevents, noerrors,
|
2016-05-17 12:55:15 +00:00
|
|
|
// Custom test function that generates a delete event
|
|
|
|
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
|
|
|
obj := ctrl.claims.List()[0]
|
2016-11-18 20:50:17 +00:00
|
|
|
claim := obj.(*v1.PersistentVolumeClaim)
|
2016-06-01 06:35:33 +00:00
|
|
|
reactor.deleteClaimEvent(claim)
|
2016-05-17 12:55:15 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// deleteVolume with a bound volume. Check the claim is Lost.
|
|
|
|
"5-4 - delete volume",
|
2016-11-18 20:50:17 +00:00
|
|
|
newVolumeArray("volume5-4", "1Gi", "uid5-4", "claim5-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
2016-05-17 12:55:15 +00:00
|
|
|
novolumes,
|
2016-11-18 20:50:17 +00:00
|
|
|
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", v1.ClaimBound, annBoundByController, annBindCompleted),
|
|
|
|
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", v1.ClaimLost, annBoundByController, annBindCompleted),
|
2016-05-17 12:55:28 +00:00
|
|
|
[]string{"Warning ClaimLost"}, noerrors,
|
2016-05-17 12:55:15 +00:00
|
|
|
// Custom test function that generates a delete event
|
|
|
|
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
|
|
|
obj := ctrl.volumes.store.List()[0]
|
2016-11-18 20:50:17 +00:00
|
|
|
volume := obj.(*v1.PersistentVolume)
|
2016-06-01 06:35:33 +00:00
|
|
|
reactor.deleteVolumeEvent(volume)
|
2016-05-17 12:55:15 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
2016-06-03 12:26:06 +00:00
|
|
|
{
|
|
|
|
// addVolume with provisioned volume from Kubernetes 1.2. No "action"
|
|
|
|
// is expected - it should stay bound.
|
|
|
|
"5-5 - add bound volume from 1.2",
|
|
|
|
novolumes,
|
2016-11-18 20:50:17 +00:00
|
|
|
[]*v1.PersistentVolume{addVolumeAnnotation(newVolume("volume5-5", "1Gi", "uid5-5", "claim5-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
|
|
|
|
newClaimArray("claim5-5", "uid5-5", "1Gi", "", v1.ClaimPending),
|
|
|
|
newClaimArray("claim5-5", "uid5-5", "1Gi", "volume5-5", v1.ClaimBound, annBindCompleted, annBoundByController),
|
2016-06-03 12:26:06 +00:00
|
|
|
noevents, noerrors,
|
|
|
|
// Custom test function that generates a add event
|
|
|
|
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
2016-11-18 20:50:17 +00:00
|
|
|
volume := newVolume("volume5-5", "1Gi", "uid5-5", "claim5-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete)
|
2016-06-03 12:26:06 +00:00
|
|
|
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)
|
|
|
|
reactor.addVolumeEvent(volume)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// updateVolume with provisioned volume from Kubernetes 1.2. No
|
|
|
|
// "action" is expected - it should stay bound.
|
|
|
|
"5-6 - update bound volume from 1.2",
|
2016-11-18 20:50:17 +00:00
|
|
|
[]*v1.PersistentVolume{addVolumeAnnotation(newVolume("volume5-6", "1Gi", "uid5-6", "claim5-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
|
|
|
|
[]*v1.PersistentVolume{addVolumeAnnotation(newVolume("volume5-6", "1Gi", "uid5-6", "claim5-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
|
|
|
|
newClaimArray("claim5-6", "uid5-6", "1Gi", "volume5-6", v1.ClaimBound),
|
|
|
|
newClaimArray("claim5-6", "uid5-6", "1Gi", "volume5-6", v1.ClaimBound, annBindCompleted),
|
2016-06-03 12:26:06 +00:00
|
|
|
noevents, noerrors,
|
|
|
|
// Custom test function that generates a add event
|
|
|
|
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
2016-11-18 20:50:17 +00:00
|
|
|
volume := newVolume("volume5-6", "1Gi", "uid5-6", "claim5-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete)
|
2016-06-03 12:26:06 +00:00
|
|
|
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)
|
|
|
|
reactor.modifyVolumeEvent(volume)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// addVolume with unprovisioned volume from Kubernetes 1.2. The
|
|
|
|
// volume should be deleted.
|
|
|
|
"5-7 - add unprovisioned volume from 1.2",
|
|
|
|
novolumes,
|
|
|
|
novolumes,
|
2016-11-18 20:50:17 +00:00
|
|
|
newClaimArray("claim5-7", "uid5-7", "1Gi", "", v1.ClaimPending),
|
|
|
|
newClaimArray("claim5-7", "uid5-7", "1Gi", "", v1.ClaimPending),
|
2016-06-03 12:26:06 +00:00
|
|
|
noevents, noerrors,
|
|
|
|
// Custom test function that generates a add event
|
|
|
|
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
2016-11-18 20:50:17 +00:00
|
|
|
volume := newVolume("volume5-7", "1Gi", "uid5-7", "claim5-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete)
|
2016-06-03 12:26:06 +00:00
|
|
|
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, "yes")
|
|
|
|
reactor.addVolumeEvent(volume)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// updateVolume with unprovisioned volume from Kubernetes 1.2. The
|
|
|
|
// volume should be deleted.
|
|
|
|
"5-8 - update bound volume from 1.2",
|
|
|
|
novolumes,
|
|
|
|
novolumes,
|
2016-11-18 20:50:17 +00:00
|
|
|
newClaimArray("claim5-8", "uid5-8", "1Gi", "", v1.ClaimPending),
|
|
|
|
newClaimArray("claim5-8", "uid5-8", "1Gi", "", v1.ClaimPending),
|
2016-06-03 12:26:06 +00:00
|
|
|
noevents, noerrors,
|
|
|
|
// Custom test function that generates a add event
|
|
|
|
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
2016-11-18 20:50:17 +00:00
|
|
|
volume := newVolume("volume5-8", "1Gi", "uid5-8", "claim5-8", v1.VolumeBound, v1.PersistentVolumeReclaimDelete)
|
2016-06-03 12:26:06 +00:00
|
|
|
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, "yes")
|
|
|
|
reactor.modifyVolumeEvent(volume)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
2016-05-17 12:55:15 +00:00
|
|
|
}
|
|
|
|
|
2016-07-11 13:35:01 +00:00
|
|
|
for _, test := range tests {
|
2016-05-17 12:55:15 +00:00
|
|
|
glog.V(4).Infof("starting test %q", test.name)
|
|
|
|
|
|
|
|
// Initialize the controller
|
|
|
|
client := &fake.Clientset{}
|
2016-09-14 18:35:38 +00:00
|
|
|
volumeSource := fcache.NewFakePVControllerSource()
|
|
|
|
claimSource := fcache.NewFakePVCControllerSource()
|
2016-08-18 08:36:49 +00:00
|
|
|
ctrl := newTestController(client, volumeSource, claimSource, nil, true)
|
2016-05-17 12:55:28 +00:00
|
|
|
reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors)
|
2016-05-17 12:55:15 +00:00
|
|
|
for _, claim := range test.initialClaims {
|
|
|
|
claimSource.Add(claim)
|
|
|
|
reactor.claims[claim.Name] = claim
|
|
|
|
}
|
|
|
|
for _, volume := range test.initialVolumes {
|
|
|
|
volumeSource.Add(volume)
|
|
|
|
reactor.volumes[volume.Name] = volume
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the controller
|
2016-08-21 18:32:12 +00:00
|
|
|
stopCh := make(chan struct{})
|
|
|
|
ctrl.Run(stopCh)
|
2016-05-17 12:55:15 +00:00
|
|
|
|
2016-06-03 08:53:56 +00:00
|
|
|
// Wait for the controller to pass initial sync and fill its caches.
|
|
|
|
for !ctrl.volumeController.HasSynced() ||
|
|
|
|
!ctrl.claimController.HasSynced() ||
|
|
|
|
len(ctrl.claims.ListKeys()) < len(test.initialClaims) ||
|
|
|
|
len(ctrl.volumes.store.ListKeys()) < len(test.initialVolumes) {
|
|
|
|
|
2016-05-17 12:55:15 +00:00
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
}
|
2016-05-30 11:16:45 +00:00
|
|
|
glog.V(4).Infof("controller synced, starting test")
|
2016-05-17 12:55:15 +00:00
|
|
|
|
|
|
|
// Call the tested function
|
|
|
|
err := test.test(ctrl, reactor, test)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Test %q initial test call failed: %v", test.name, err)
|
|
|
|
}
|
2016-05-31 10:07:47 +00:00
|
|
|
// Simulate a periodic resync, just in case some events arrived in a
|
|
|
|
// wrong order.
|
|
|
|
ctrl.claims.Resync()
|
|
|
|
ctrl.volumes.store.Resync()
|
2016-05-17 12:55:15 +00:00
|
|
|
|
2016-07-11 13:35:01 +00:00
|
|
|
err = reactor.waitTest(test)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Failed to run test %s: %v", test.name, err)
|
2016-05-23 23:25:02 +00:00
|
|
|
}
|
2016-08-21 18:32:12 +00:00
|
|
|
close(stopCh)
|
2016-06-03 12:26:06 +00:00
|
|
|
|
2016-05-17 12:55:15 +00:00
|
|
|
evaluateTestResults(ctrl, reactor, test, t)
|
|
|
|
}
|
|
|
|
}
|
volume controller: Add cache with the latest version of PVs and PVCs
When the controller binds a PV to PVC, it saves both objects to etcd.
However, there is still an old version of these objects in the controller
Informer cache. So, when a new PVC comes, the PV is still seen as available
and may get bound to the new PVC. This will be blocked by etcd, still, it
creates unnecessary traffic that slows everything down.
Also, we save bound PV/PVC as two transactions - we save PV/PVC.Spec first
and then .Status. The controller gets "PV/PVC.Spec updated" event from etcd
and tries to fix the Status, as it seems to the controller it's outdated.
This write again fails - there already is a correct version in etcd.
We can't influence the Informer cache, it is read-only to the controller.
To prevent these useless writes to etcd, this patch introduces second cache
in the controller, which holds latest and greatest version on PVs and PVCs.
It gets updated with events from etcd *and* after etcd confirms successful
save of PV/PVC modified by the controller.
The cache stores only *pointers* to PVs/PVCs, so in ideal case it shares the
actual object data with the informer cache. They will diverge only when
the controller modifies something and the informer cache did not get update
events yet.
2016-05-19 11:31:19 +00:00
|
|
|
|
|
|
|
func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) {
|
2016-11-18 20:50:17 +00:00
|
|
|
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete)
|
volume controller: Add cache with the latest version of PVs and PVCs
When the controller binds a PV to PVC, it saves both objects to etcd.
However, there is still an old version of these objects in the controller
Informer cache. So, when a new PVC comes, the PV is still seen as available
and may get bound to the new PVC. This will be blocked by etcd, still, it
creates unnecessary traffic that slows everything down.
Also, we save bound PV/PVC as two transactions - we save PV/PVC.Spec first
and then .Status. The controller gets "PV/PVC.Spec updated" event from etcd
and tries to fix the Status, as it seems to the controller it's outdated.
This write again fails - there already is a correct version in etcd.
We can't influence the Informer cache, it is read-only to the controller.
To prevent these useless writes to etcd, this patch introduces second cache
in the controller, which holds latest and greatest version on PVs and PVCs.
It gets updated with events from etcd *and* after etcd confirms successful
save of PV/PVC modified by the controller.
The cache stores only *pointers* to PVs/PVCs, so in ideal case it shares the
actual object data with the informer cache. They will diverge only when
the controller modifies something and the informer cache did not get update
events yet.
2016-05-19 11:31:19 +00:00
|
|
|
pv.ResourceVersion = version
|
|
|
|
ret, err := storeObjectUpdate(c, pv, "volume")
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err)
|
|
|
|
}
|
|
|
|
if expectedReturn != ret {
|
|
|
|
t.Errorf("%s: expected storeObjectUpdate to return %v, got: %v", prefix, expectedReturn, ret)
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the stored version
|
|
|
|
|
|
|
|
pvObj, found, err := c.GetByKey("pvName")
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("expected volume 'pvName' in the cache, got error instead: %v", err)
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
t.Errorf("expected volume 'pvName' in the cache but it was not found")
|
|
|
|
}
|
2016-11-18 20:50:17 +00:00
|
|
|
pv, ok := pvObj.(*v1.PersistentVolume)
|
volume controller: Add cache with the latest version of PVs and PVCs
When the controller binds a PV to PVC, it saves both objects to etcd.
However, there is still an old version of these objects in the controller
Informer cache. So, when a new PVC comes, the PV is still seen as available
and may get bound to the new PVC. This will be blocked by etcd, still, it
creates unnecessary traffic that slows everything down.
Also, we save bound PV/PVC as two transactions - we save PV/PVC.Spec first
and then .Status. The controller gets "PV/PVC.Spec updated" event from etcd
and tries to fix the Status, as it seems to the controller it's outdated.
This write again fails - there already is a correct version in etcd.
We can't influence the Informer cache, it is read-only to the controller.
To prevent these useless writes to etcd, this patch introduces second cache
in the controller, which holds latest and greatest version on PVs and PVCs.
It gets updated with events from etcd *and* after etcd confirms successful
save of PV/PVC modified by the controller.
The cache stores only *pointers* to PVs/PVCs, so in ideal case it shares the
actual object data with the informer cache. They will diverge only when
the controller modifies something and the informer cache did not get update
events yet.
2016-05-19 11:31:19 +00:00
|
|
|
if !ok {
|
2016-06-14 12:04:38 +00:00
|
|
|
t.Errorf("expected volume in the cache, got different object instead: %#v", pvObj)
|
volume controller: Add cache with the latest version of PVs and PVCs
When the controller binds a PV to PVC, it saves both objects to etcd.
However, there is still an old version of these objects in the controller
Informer cache. So, when a new PVC comes, the PV is still seen as available
and may get bound to the new PVC. This will be blocked by etcd, still, it
creates unnecessary traffic that slows everything down.
Also, we save bound PV/PVC as two transactions - we save PV/PVC.Spec first
and then .Status. The controller gets "PV/PVC.Spec updated" event from etcd
and tries to fix the Status, as it seems to the controller it's outdated.
This write again fails - there already is a correct version in etcd.
We can't influence the Informer cache, it is read-only to the controller.
To prevent these useless writes to etcd, this patch introduces second cache
in the controller, which holds latest and greatest version on PVs and PVCs.
It gets updated with events from etcd *and* after etcd confirms successful
save of PV/PVC modified by the controller.
The cache stores only *pointers* to PVs/PVCs, so in ideal case it shares the
actual object data with the informer cache. They will diverge only when
the controller modifies something and the informer cache did not get update
events yet.
2016-05-19 11:31:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ret {
|
|
|
|
if pv.ResourceVersion != version {
|
|
|
|
t.Errorf("expected volume with version %s in the cache, got %s instead", version, pv.ResourceVersion)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if pv.ResourceVersion == version {
|
|
|
|
t.Errorf("expected volume with version other than %s in the cache, got %s instead", version, pv.ResourceVersion)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestControllerCache tests func storeObjectUpdate()
|
|
|
|
func TestControllerCache(t *testing.T) {
|
|
|
|
// Cache under test
|
2016-09-14 18:35:38 +00:00
|
|
|
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
|
volume controller: Add cache with the latest version of PVs and PVCs
When the controller binds a PV to PVC, it saves both objects to etcd.
However, there is still an old version of these objects in the controller
Informer cache. So, when a new PVC comes, the PV is still seen as available
and may get bound to the new PVC. This will be blocked by etcd, still, it
creates unnecessary traffic that slows everything down.
Also, we save bound PV/PVC as two transactions - we save PV/PVC.Spec first
and then .Status. The controller gets "PV/PVC.Spec updated" event from etcd
and tries to fix the Status, as it seems to the controller it's outdated.
This write again fails - there already is a correct version in etcd.
We can't influence the Informer cache, it is read-only to the controller.
To prevent these useless writes to etcd, this patch introduces second cache
in the controller, which holds latest and greatest version on PVs and PVCs.
It gets updated with events from etcd *and* after etcd confirms successful
save of PV/PVC modified by the controller.
The cache stores only *pointers* to PVs/PVCs, so in ideal case it shares the
actual object data with the informer cache. They will diverge only when
the controller modifies something and the informer cache did not get update
events yet.
2016-05-19 11:31:19 +00:00
|
|
|
|
|
|
|
// Store new PV
|
|
|
|
storeVersion(t, "Step1", c, "1", true)
|
|
|
|
// Store the same PV
|
|
|
|
storeVersion(t, "Step2", c, "1", true)
|
|
|
|
// Store newer PV
|
|
|
|
storeVersion(t, "Step3", c, "2", true)
|
|
|
|
// Store older PV - simulating old "PV updated" event or periodic sync with
|
|
|
|
// old data
|
|
|
|
storeVersion(t, "Step4", c, "1", false)
|
|
|
|
// Store newer PV - test integer parsing ("2" > "10" as string,
|
|
|
|
// while 2 < 10 as integers)
|
|
|
|
storeVersion(t, "Step5", c, "10", true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestControllerCacheParsingError(t *testing.T) {
|
2016-09-14 18:35:38 +00:00
|
|
|
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
|
volume controller: Add cache with the latest version of PVs and PVCs
When the controller binds a PV to PVC, it saves both objects to etcd.
However, there is still an old version of these objects in the controller
Informer cache. So, when a new PVC comes, the PV is still seen as available
and may get bound to the new PVC. This will be blocked by etcd, still, it
creates unnecessary traffic that slows everything down.
Also, we save bound PV/PVC as two transactions - we save PV/PVC.Spec first
and then .Status. The controller gets "PV/PVC.Spec updated" event from etcd
and tries to fix the Status, as it seems to the controller it's outdated.
This write again fails - there already is a correct version in etcd.
We can't influence the Informer cache, it is read-only to the controller.
To prevent these useless writes to etcd, this patch introduces second cache
in the controller, which holds latest and greatest version on PVs and PVCs.
It gets updated with events from etcd *and* after etcd confirms successful
save of PV/PVC modified by the controller.
The cache stores only *pointers* to PVs/PVCs, so in ideal case it shares the
actual object data with the informer cache. They will diverge only when
the controller modifies something and the informer cache did not get update
events yet.
2016-05-19 11:31:19 +00:00
|
|
|
// There must be something in the cache to compare with
|
|
|
|
storeVersion(t, "Step1", c, "1", true)
|
|
|
|
|
2016-11-18 20:50:17 +00:00
|
|
|
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete)
|
volume controller: Add cache with the latest version of PVs and PVCs
When the controller binds a PV to PVC, it saves both objects to etcd.
However, there is still an old version of these objects in the controller
Informer cache. So, when a new PVC comes, the PV is still seen as available
and may get bound to the new PVC. This will be blocked by etcd, still, it
creates unnecessary traffic that slows everything down.
Also, we save bound PV/PVC as two transactions - we save PV/PVC.Spec first
and then .Status. The controller gets "PV/PVC.Spec updated" event from etcd
and tries to fix the Status, as it seems to the controller it's outdated.
This write again fails - there already is a correct version in etcd.
We can't influence the Informer cache, it is read-only to the controller.
To prevent these useless writes to etcd, this patch introduces second cache
in the controller, which holds latest and greatest version on PVs and PVCs.
It gets updated with events from etcd *and* after etcd confirms successful
save of PV/PVC modified by the controller.
The cache stores only *pointers* to PVs/PVCs, so in ideal case it shares the
actual object data with the informer cache. They will diverge only when
the controller modifies something and the informer cache did not get update
events yet.
2016-05-19 11:31:19 +00:00
|
|
|
pv.ResourceVersion = "xxx"
|
|
|
|
_, err := storeObjectUpdate(c, pv, "volume")
|
|
|
|
if err == nil {
|
|
|
|
t.Errorf("Expected parsing error, got nil instead")
|
|
|
|
}
|
|
|
|
}
|
2016-06-03 12:26:06 +00:00
|
|
|
|
2016-11-18 20:50:17 +00:00
|
|
|
func addVolumeAnnotation(volume *v1.PersistentVolume, annName, annValue string) *v1.PersistentVolume {
|
2016-06-03 12:26:06 +00:00
|
|
|
if volume.Annotations == nil {
|
|
|
|
volume.Annotations = make(map[string]string)
|
|
|
|
}
|
|
|
|
volume.Annotations[annName] = annValue
|
|
|
|
return volume
|
|
|
|
}
|