mirror of https://github.com/k3s-io/k3s
Restore alpha behavior
parent
d8a95a3785
commit
bb5d562f37
|
@ -419,9 +419,14 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
glog.Infof("Not starting %s apis", groupVersion)
|
||||
}
|
||||
|
||||
alphaProvisioner, err := NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
|
||||
if err != nil {
|
||||
glog.Fatalf("An backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
|
||||
}
|
||||
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
|
||||
s.PVClaimBinderSyncPeriod.Duration,
|
||||
alphaProvisioner,
|
||||
ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
|
||||
cloud,
|
||||
s.ClusterName,
|
||||
|
|
|
@ -114,6 +114,30 @@ func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config componen
|
|||
return allPlugins
|
||||
}
|
||||
|
||||
// NewAlphaVolumeProvisioner returns a volume provisioner to use when running in
|
||||
// a cloud or development environment. The alpha implementation of provisioning
|
||||
// allows 1 implied provisioner per cloud and is here only for compatibility
|
||||
// with Kubernetes 1.3
|
||||
// TODO: remove in Kubernetes 1.5
|
||||
func NewAlphaVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) (volume.ProvisionableVolumePlugin, error) {
|
||||
switch {
|
||||
case cloud == nil && config.EnableHostPathProvisioning:
|
||||
return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins(
|
||||
volume.VolumeConfig{
|
||||
ProvisioningEnabled: true,
|
||||
}))
|
||||
case cloud != nil && aws.ProviderName == cloud.ProviderName():
|
||||
return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins())
|
||||
case cloud != nil && gce.ProviderName == cloud.ProviderName():
|
||||
return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins())
|
||||
case cloud != nil && openstack.ProviderName == cloud.ProviderName():
|
||||
return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins())
|
||||
case cloud != nil && vsphere.ProviderName == cloud.ProviderName():
|
||||
return getProvisionablePluginFromVolumePlugins(vsphere_volume.ProbeVolumePlugins())
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func getProvisionablePluginFromVolumePlugins(plugins []volume.VolumePlugin) (volume.ProvisionableVolumePlugin, error) {
|
||||
for _, plugin := range plugins {
|
||||
if provisonablePlugin, ok := plugin.(volume.ProvisionableVolumePlugin); ok {
|
||||
|
|
|
@ -285,9 +285,14 @@ func (s *CMServer) Run(_ []string) error {
|
|||
}
|
||||
}
|
||||
|
||||
alphaProvisioner, err := kubecontrollermanager.NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
|
||||
if err != nil {
|
||||
glog.Fatalf("An backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
|
||||
}
|
||||
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
|
||||
s.PVClaimBinderSyncPeriod.Duration,
|
||||
alphaProvisioner,
|
||||
kubecontrollermanager.ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
|
||||
cloud,
|
||||
s.ClusterName,
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
@ -121,7 +122,7 @@ const annClass = "volume.beta.kubernetes.io/storage-class"
|
|||
|
||||
// alphaAnnClass annotation represents the previous alpha storage class
|
||||
// annotation. it's no longer used and held here for posterity.
|
||||
const alphaAnnClass = "volume.alpha.kubernetes.io/storage-class"
|
||||
const annAlphaClass = "volume.alpha.kubernetes.io/storage-class"
|
||||
|
||||
// This annotation is added to a PV that has been dynamically provisioned by
|
||||
// Kubernetes. Its value is name of volume plugin that created the volume.
|
||||
|
@ -186,6 +187,10 @@ type PersistentVolumeController struct {
|
|||
|
||||
createProvisionedPVRetryCount int
|
||||
createProvisionedPVInterval time.Duration
|
||||
|
||||
// Provisioner for annAlphaClass.
|
||||
// TODO: remove in 1.5
|
||||
alphaProvisioner vol.ProvisionableVolumePlugin
|
||||
}
|
||||
|
||||
// syncClaim is the main controller method to decide what to do with a claim.
|
||||
|
@ -211,6 +216,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *api.PersistentVo
|
|||
// OBSERVATION: pvc is "Pending"
|
||||
if claim.Spec.VolumeName == "" {
|
||||
// User did not care which PV they get.
|
||||
|
||||
// [Unit test set 1]
|
||||
volume, err := ctrl.volumes.findBestMatchForClaim(claim)
|
||||
if err != nil {
|
||||
|
@ -221,7 +227,8 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *api.PersistentVo
|
|||
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim))
|
||||
// No PV could be found
|
||||
// OBSERVATION: pvc is "Pending", will retry
|
||||
if getClaimClass(claim) != "" {
|
||||
// TODO: remove Alpha check in 1.5
|
||||
if getClaimClass(claim) != "" || hasAnnotation(claim.ObjectMeta, annAlphaClass) {
|
||||
if err = ctrl.provisionClaim(claim); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1198,6 +1205,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
|||
glog.Errorf("Cannot convert provisionClaimOperation argument to claim, got %#v", claimObj)
|
||||
return
|
||||
}
|
||||
|
||||
claimClass := getClaimClass(claim)
|
||||
glog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass)
|
||||
|
||||
|
@ -1276,7 +1284,12 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
|||
// Add annBoundByController (used in deleting the volume)
|
||||
setAnnotation(&volume.ObjectMeta, annBoundByController, "yes")
|
||||
setAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.GetPluginName())
|
||||
setAnnotation(&volume.ObjectMeta, annClass, claimClass)
|
||||
// For Alpha provisioning behavior, do not add annClass for volumes created
|
||||
// by annAlphaClass
|
||||
// TODO: remove this check in 1.5, annClass will be always non-empty there.
|
||||
if claimClass != "" {
|
||||
setAnnotation(&volume.ObjectMeta, annClass, claimClass)
|
||||
}
|
||||
|
||||
// Try to create the PV object several times
|
||||
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
|
||||
|
@ -1357,6 +1370,20 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string,
|
|||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *api.PersistentVolumeClaim) (vol.ProvisionableVolumePlugin, *extensions.StorageClass, error) {
|
||||
// TODO: remove this alpha behavior in 1.5
|
||||
alpha := hasAnnotation(claim.ObjectMeta, annAlphaClass)
|
||||
beta := hasAnnotation(claim.ObjectMeta, annClass)
|
||||
if alpha && beta {
|
||||
// Both Alpha and Beta annotations are set. Do beta.
|
||||
alpha = false
|
||||
msg := fmt.Sprintf("both %q and %q annotations are present, using %q", annAlphaClass, annClass, annClass)
|
||||
ctrl.eventRecorder.Event(claim, api.EventTypeNormal, "ProvisioningIgnoreAlpha", msg)
|
||||
}
|
||||
if alpha {
|
||||
// Fall back to fixed list of provisioner plugins
|
||||
return ctrl.findAlphaProvisionablePlugin()
|
||||
}
|
||||
|
||||
// provisionClaim() which leads here is never called with claimClass=="", we
|
||||
// can save some checks.
|
||||
claimClass := getClaimClass(claim)
|
||||
|
@ -1379,3 +1406,25 @@ func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *api.Persi
|
|||
}
|
||||
return plugin, class, nil
|
||||
}
|
||||
|
||||
// findAlphaProvisionablePlugin returns a volume plugin compatible with
|
||||
// Kubernetes 1.3.
|
||||
// TODO: remove in Kubernetes 1.5
|
||||
func (ctrl *PersistentVolumeController) findAlphaProvisionablePlugin() (vol.ProvisionableVolumePlugin, *extensions.StorageClass, error) {
|
||||
if ctrl.alphaProvisioner == nil {
|
||||
return nil, nil, fmt.Errorf("cannot find volume plugin for alpha provisioning")
|
||||
}
|
||||
|
||||
// Return a dummy StorageClass instance with no parameters
|
||||
storageClass := &extensions.StorageClass{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "",
|
||||
},
|
||||
Provisioner: ctrl.alphaProvisioner.GetPluginName(),
|
||||
}
|
||||
glog.V(4).Infof("using alpha provisioner %s", ctrl.alphaProvisioner.GetPluginName())
|
||||
return ctrl.alphaProvisioner, storageClass, nil
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ import (
|
|||
func NewPersistentVolumeController(
|
||||
kubeClient clientset.Interface,
|
||||
syncPeriod time.Duration,
|
||||
alphaProvisioner vol.ProvisionableVolumePlugin,
|
||||
volumePlugins []vol.VolumePlugin,
|
||||
cloud cloudprovider.Interface,
|
||||
clusterName string,
|
||||
|
@ -73,9 +74,15 @@ func NewPersistentVolumeController(
|
|||
clusterName: clusterName,
|
||||
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
|
||||
createProvisionedPVInterval: createProvisionedPVInterval,
|
||||
alphaProvisioner: alphaProvisioner,
|
||||
}
|
||||
|
||||
controller.volumePluginMgr.InitPlugins(volumePlugins, controller)
|
||||
if controller.alphaProvisioner != nil {
|
||||
if err := controller.alphaProvisioner.Init(controller); err != nil {
|
||||
glog.Errorf("PersistentVolumeController: error initializing alpha provisioner plugin: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if volumeSource == nil {
|
||||
volumeSource = &cache.ListWatch{
|
||||
|
|
|
@ -597,6 +597,7 @@ func newTestController(kubeClient clientset.Interface, volumeSource, claimSource
|
|||
ctrl := NewPersistentVolumeController(
|
||||
kubeClient,
|
||||
5*time.Second, // sync period
|
||||
nil, // alpha provisioner
|
||||
[]vol.VolumePlugin{}, // recyclers
|
||||
nil, // cloud
|
||||
"",
|
||||
|
@ -820,7 +821,9 @@ func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error,
|
|||
provisionCalls: expectedProvisionCalls,
|
||||
}
|
||||
ctrl.volumePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl)
|
||||
|
||||
if expectedProvisionCalls != nil {
|
||||
ctrl.alphaProvisioner = plugin
|
||||
}
|
||||
return toWrap(ctrl, reactor, test)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -124,6 +124,14 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo
|
|||
return volume, nil
|
||||
}
|
||||
|
||||
// In Alpha dynamic provisioning, we do now want not match claims
|
||||
// with existing PVs, findByClaim must find only PVs that are
|
||||
// pre-bound to the claim (by dynamic provisioning). TODO: remove in
|
||||
// 1.5
|
||||
if hasAnnotation(claim.ObjectMeta, annAlphaClass) {
|
||||
continue
|
||||
}
|
||||
|
||||
// filter out:
|
||||
// - volumes bound to another claim
|
||||
// - volumes whose labels don't match the claim's selector, if specified
|
||||
|
|
|
@ -74,6 +74,10 @@ var provision2Success = provisionCall{
|
|||
expectedParameters: class2Parameters,
|
||||
}
|
||||
|
||||
var provisionAlphaSuccess = provisionCall{
|
||||
ret: nil,
|
||||
}
|
||||
|
||||
// Test single call to syncVolume, expecting provisioning to happen.
|
||||
// 1. Fill in the controller with initial data
|
||||
// 2. Call the syncVolume *once*.
|
||||
|
@ -314,6 +318,36 @@ func TestProvisionSync(t *testing.T) {
|
|||
runSyncTests(t, tests, storageClasses)
|
||||
}
|
||||
|
||||
func TestAlphaProvisionSync(t *testing.T) {
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// Provision a volume with alpha annotation
|
||||
"14-1 - successful alpha provisioning",
|
||||
novolumes,
|
||||
newVolumeArray("pvc-uid14-1", "1Gi", "uid14-1", "claim14-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
newClaimArray("claim14-1", "uid14-1", "1Gi", "", api.ClaimPending, annAlphaClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newClaimArray("claim14-1", "uid14-1", "1Gi", "", api.ClaimPending, annAlphaClass),
|
||||
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provisionAlphaSuccess}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision success - there is already a volume available, still
|
||||
// we provision a new one when requested.
|
||||
"14-2 - no alpha provisioning when there is a volume available",
|
||||
newVolumeArray("volume14-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume14-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("pvc-uid14-2", "1Gi", "uid14-2", "claim14-2", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
},
|
||||
newClaimArray("claim14-2", "uid14-2", "1Gi", "", api.ClaimPending, annAlphaClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newClaimArray("claim14-2", "uid14-2", "1Gi", "", api.ClaimPending, annAlphaClass),
|
||||
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provisionAlphaSuccess}, testSyncClaim),
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests, []*extensions.StorageClass{})
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
// volume/claims. The test follows this pattern:
|
||||
// 0. Load the controller with initial data.
|
||||
|
|
|
@ -38,6 +38,69 @@ const (
|
|||
expectedSize = "2Gi"
|
||||
)
|
||||
|
||||
func testDynamicProvisioning(client *client.Client, claim *api.PersistentVolumeClaim) {
|
||||
err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("checking the claim")
|
||||
// Get new copy of the claim
|
||||
claim, err = client.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get the bound PV
|
||||
pv, err := client.PersistentVolumes().Get(claim.Spec.VolumeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check sizes
|
||||
expectedCapacity := resource.MustParse(expectedSize)
|
||||
pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
|
||||
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
|
||||
|
||||
requestedCapacity := resource.MustParse(requestedSize)
|
||||
claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))
|
||||
|
||||
// Check PV properties
|
||||
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
|
||||
expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
|
||||
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
|
||||
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
|
||||
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
|
||||
|
||||
// We start two pods:
|
||||
// - The first writes 'hello word' to the /mnt/test (= the volume).
|
||||
// - The second one runs grep 'hello world' on /mnt/test.
|
||||
// If both succeed, Kubernetes actually allocated something that is
|
||||
// persistent across pods.
|
||||
By("checking the created volume is writable")
|
||||
runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data")
|
||||
|
||||
By("checking the created volume is readable and retains data")
|
||||
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
|
||||
|
||||
// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
|
||||
// probably collide with destruction of the pods above - the pods
|
||||
// still have the volume attached (kubelet is slow...) and deletion
|
||||
// of attached volume is not allowed by AWS/GCE/OpenStack.
|
||||
// Kubernetes *will* retry deletion several times in
|
||||
// pvclaimbinder-sync-period.
|
||||
// So, technically, this sleep is not needed. On the other hand,
|
||||
// the sync perion is 10 minutes and we really don't want to wait
|
||||
// 10 minutes here. There is no way how to see if kubelet is
|
||||
// finished with cleaning volumes. A small sleep here actually
|
||||
// speeds up the test!
|
||||
// Three minutes should be enough to clean up the pods properly.
|
||||
// We've seen GCE PD detach to take more than 1 minute.
|
||||
By("Sleeping to let kubelet destroy all pods")
|
||||
time.Sleep(3 * time.Minute)
|
||||
|
||||
By("deleting the claim")
|
||||
framework.ExpectNoError(client.PersistentVolumeClaims(claim.Namespace).Delete(claim.Name))
|
||||
|
||||
// Wait for the PV to get deleted too.
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Dynamic provisioning", func() {
|
||||
f := framework.NewDefaultFramework("volume-provisioning")
|
||||
|
||||
|
@ -61,85 +124,39 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating a claim with a dynamic provisioning annotation")
|
||||
claim := newClaim(ns)
|
||||
claim := newClaim(ns, false)
|
||||
defer func() {
|
||||
c.PersistentVolumeClaims(ns).Delete(claim.Name)
|
||||
}()
|
||||
claim, err = c.PersistentVolumeClaims(ns).Create(claim)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
testDynamicProvisioning(c, claim)
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("DynamicProvisioner Alpha", func() {
|
||||
It("should create and delete alpha persistent volumes", func() {
|
||||
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
|
||||
|
||||
By("creating a claim with an alpha dynamic provisioning annotation")
|
||||
claim := newClaim(ns, true)
|
||||
defer func() {
|
||||
c.PersistentVolumeClaims(ns).Delete(claim.Name)
|
||||
}()
|
||||
claim, err := c.PersistentVolumeClaims(ns).Create(claim)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("checking the claim")
|
||||
// Get new copy of the claim
|
||||
claim, err = c.PersistentVolumeClaims(ns).Get(claim.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get the bound PV
|
||||
pv, err := c.PersistentVolumes().Get(claim.Spec.VolumeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check sizes
|
||||
expectedCapacity := resource.MustParse(expectedSize)
|
||||
pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
|
||||
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
|
||||
|
||||
requestedCapacity := resource.MustParse(requestedSize)
|
||||
claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))
|
||||
|
||||
// Check PV properties
|
||||
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
|
||||
expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
|
||||
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
|
||||
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
|
||||
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
|
||||
|
||||
// We start two pods:
|
||||
// - The first writes 'hello word' to the /mnt/test (= the volume).
|
||||
// - The second one runs grep 'hello world' on /mnt/test.
|
||||
// If both succeed, Kubernetes actually allocated something that is
|
||||
// persistent across pods.
|
||||
By("checking the created volume is writable")
|
||||
runInPodWithVolume(c, ns, claim.Name, "echo 'hello world' > /mnt/test/data")
|
||||
|
||||
By("checking the created volume is readable and retains data")
|
||||
runInPodWithVolume(c, ns, claim.Name, "grep 'hello world' /mnt/test/data")
|
||||
|
||||
// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
|
||||
// probably collide with destruction of the pods above - the pods
|
||||
// still have the volume attached (kubelet is slow...) and deletion
|
||||
// of attached volume is not allowed by AWS/GCE/OpenStack.
|
||||
// Kubernetes *will* retry deletion several times in
|
||||
// pvclaimbinder-sync-period.
|
||||
// So, technically, this sleep is not needed. On the other hand,
|
||||
// the sync perion is 10 minutes and we really don't want to wait
|
||||
// 10 minutes here. There is no way how to see if kubelet is
|
||||
// finished with cleaning volumes. A small sleep here actually
|
||||
// speeds up the test!
|
||||
// Three minutes should be enough to clean up the pods properly.
|
||||
// We've seen GCE PD detach to take more than 1 minute.
|
||||
By("Sleeping to let kubelet destroy all pods")
|
||||
time.Sleep(3 * time.Minute)
|
||||
|
||||
By("deleting the claim")
|
||||
framework.ExpectNoError(c.PersistentVolumeClaims(ns).Delete(claim.Name))
|
||||
|
||||
// Wait for the PV to get deleted too.
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, 20*time.Minute))
|
||||
testDynamicProvisioning(c, claim)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newClaim(ns string) *api.PersistentVolumeClaim {
|
||||
return &api.PersistentVolumeClaim{
|
||||
func newClaim(ns string, alpha bool) *api.PersistentVolumeClaim {
|
||||
claim := api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: ns,
|
||||
Annotations: map[string]string{
|
||||
"volume.beta.kubernetes.io/storage-class": "fast",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
|
@ -152,6 +169,19 @@ func newClaim(ns string) *api.PersistentVolumeClaim {
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
if alpha {
|
||||
claim.Annotations = map[string]string{
|
||||
"volume.alpha.kubernetes.io/storage-class": "",
|
||||
}
|
||||
} else {
|
||||
claim.Annotations = map[string]string{
|
||||
"volume.beta.kubernetes.io/storage-class": "fast",
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return &claim
|
||||
}
|
||||
|
||||
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
||||
|
|
|
@ -1116,7 +1116,18 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeri
|
|||
cloud := &fake_cloud.FakeCloud{}
|
||||
|
||||
syncPeriod = getSyncPeriod(syncPeriod)
|
||||
ctrl := persistentvolumecontroller.NewPersistentVolumeController(binderClient, syncPeriod, plugins, cloud, "", nil, nil, nil, nil, true)
|
||||
ctrl := persistentvolumecontroller.NewPersistentVolumeController(
|
||||
binderClient,
|
||||
syncPeriod,
|
||||
nil, // alpha provisioner
|
||||
plugins,
|
||||
cloud,
|
||||
"", // cluster name
|
||||
nil, // volumeSource
|
||||
nil, // claimSource
|
||||
nil, // classSource
|
||||
nil, // eventRecorder
|
||||
true) // enableDynamicProvisioning
|
||||
|
||||
watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue