mirror of https://github.com/k3s-io/k3s
addressed feedback. added opt-in cmd line flag
parent
2fe8ed5358
commit
beacd8722a
|
@ -60,6 +60,7 @@ type CMServer struct {
|
|||
ResourceQuotaSyncPeriod time.Duration
|
||||
NamespaceSyncPeriod time.Duration
|
||||
PVClaimBinderSyncPeriod time.Duration
|
||||
EnablePVCClaimBinder bool
|
||||
RegisterRetryCount int
|
||||
MachineList util.StringList
|
||||
SyncNodeList bool
|
||||
|
@ -117,6 +118,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.DurationVar(&s.ResourceQuotaSyncPeriod, "resource_quota_sync_period", s.ResourceQuotaSyncPeriod, "The period for syncing quota usage status in the system")
|
||||
fs.DurationVar(&s.NamespaceSyncPeriod, "namespace_sync_period", s.NamespaceSyncPeriod, "The period for syncing namespace life-cycle updates")
|
||||
fs.DurationVar(&s.PVClaimBinderSyncPeriod, "pvclaimbinder_sync_period", s.PVClaimBinderSyncPeriod, "The period for syncing persistent volumes and persistent volume claims")
|
||||
fs.BoolVar(&s.EnablePVCClaimBinder, "enable_alpha_pvclaimbinder", s.EnablePVCClaimBinder, "Optionally enable persistent volume claim binding. This feature is experimental and expected to change.")
|
||||
fs.DurationVar(&s.PodEvictionTimeout, "pod_eviction_timeout", s.PodEvictionTimeout, "The grace peroid for deleting pods on failed nodes.")
|
||||
fs.Float32Var(&s.DeletingPodsQps, "deleting_pods_qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
|
||||
fs.IntVar(&s.DeletingPodsBurst, "deleting_pods_burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
||||
|
@ -235,8 +237,10 @@ func (s *CMServer) Run(_ []string) error {
|
|||
namespaceManager := namespace.NewNamespaceManager(kubeClient, s.NamespaceSyncPeriod)
|
||||
namespaceManager.Run()
|
||||
|
||||
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
|
||||
pvclaimBinder.Run()
|
||||
if s.EnablePVCClaimBinder {
|
||||
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
|
||||
pvclaimBinder.Run()
|
||||
}
|
||||
|
||||
select {}
|
||||
return nil
|
||||
|
|
|
@ -203,11 +203,13 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
|
|||
},
|
||||
func(pv *api.PersistentVolume, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(pv) // fuzz self without calling this function again
|
||||
pv.Status.Phase = api.VolumePending
|
||||
types := []api.PersistentVolumePhase{api.VolumePending, api.VolumeBound, api.VolumeReleased, api.VolumeAvailable}
|
||||
pv.Status.Phase = types[c.Rand.Intn(len(types))]
|
||||
},
|
||||
func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(pvc) // fuzz self without calling this function again
|
||||
pvc.Status.Phase = api.ClaimPending
|
||||
types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending}
|
||||
pvc.Status.Phase = types[c.Rand.Intn(len(types))]
|
||||
},
|
||||
func(s *api.NamespaceSpec, c fuzz.Continue) {
|
||||
s.Finalizers = []api.FinalizerName{api.FinalizerKubernetes}
|
||||
|
|
|
@ -84,6 +84,8 @@ func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time
|
|||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: binder.addClaim,
|
||||
UpdateFunc: binder.updateClaim,
|
||||
// no DeleteFunc needed. a claim requires no clean-up.
|
||||
// the missing claim itself is the release of the resource.
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -177,6 +179,8 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
|||
if err == nil {
|
||||
// bound and active. Build claim status as needed.
|
||||
if claim.Status.VolumeRef == nil {
|
||||
// syncClaimStatus sets VolumeRef, attempts to persist claim status,
|
||||
// and does a rollback as needed on claim.Status
|
||||
syncClaimStatus(binderClient, volume, claim)
|
||||
}
|
||||
} else {
|
||||
|
@ -198,6 +202,9 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
|||
|
||||
if currentPhase != nextPhase {
|
||||
volume.Status.Phase = nextPhase
|
||||
|
||||
// a change in state will trigger another update through this controller.
|
||||
// each pass through this controller evaluates current phase and decides whether or not to change to the next phase
|
||||
volume, err := binderClient.UpdatePersistentVolumeStatus(volume)
|
||||
if err != nil {
|
||||
// Rollback to previous phase
|
||||
|
@ -220,7 +227,6 @@ func syncClaim(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCli
|
|||
nextPhase := currentPhase
|
||||
|
||||
switch currentPhase {
|
||||
|
||||
// pending claims await a matching volume
|
||||
case api.ClaimPending:
|
||||
volume, err := volumeIndex.FindBestMatchForClaim(claim)
|
||||
|
|
Loading…
Reference in New Issue