mirror of https://github.com/k3s-io/k3s
combine feature gate VolumeScheduling and DynamicProvisioningScheduling into one
parent
cd06419973
commit
b4a57f6855
|
@ -3137,7 +3137,7 @@ func ValidateTopologySelectorTerm(term core.TopologySelectorTerm, fldPath *field
|
|||
exprMap := make(map[string]sets.String)
|
||||
exprPath := fldPath.Child("matchLabelExpressions")
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
// Allow empty MatchLabelExpressions, in case this field becomes optional in the future.
|
||||
|
||||
for i, req := range term.MatchLabelExpressions {
|
||||
|
@ -3152,7 +3152,7 @@ func ValidateTopologySelectorTerm(term core.TopologySelectorTerm, fldPath *field
|
|||
exprMap[req.Key] = valueSet
|
||||
}
|
||||
} else if len(term.MatchLabelExpressions) != 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "field is disabled by feature-gate DynamicProvisioningScheduling"))
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "field is disabled by feature-gate VolumeScheduling"))
|
||||
}
|
||||
|
||||
return exprMap, allErrs
|
||||
|
|
|
@ -68,16 +68,14 @@ type StorageClass struct {
|
|||
|
||||
// VolumeBindingMode indicates how PersistentVolumeClaims should be
|
||||
// provisioned and bound. When unset, VolumeBindingImmediate is used.
|
||||
// This field is alpha-level and is only honored by servers that enable
|
||||
// the VolumeScheduling feature.
|
||||
// This field is only honored by servers that enable the VolumeScheduling feature.
|
||||
// +optional
|
||||
VolumeBindingMode *VolumeBindingMode
|
||||
|
||||
// Restrict the node topologies where volumes can be dynamically provisioned.
|
||||
// Each volume plugin defines its own supported topology specifications.
|
||||
// An empty TopologySelectorTerm list means there is no topology restriction.
|
||||
// This field is alpha-level and is only honored by servers that enable
|
||||
// the DynamicProvisioningScheduling feature.
|
||||
// This field is only honored by servers that enable the VolumeScheduling feature.
|
||||
// +optional
|
||||
AllowedTopologies []api.TopologySelectorTerm
|
||||
}
|
||||
|
|
|
@ -26,8 +26,6 @@ import (
|
|||
func DropDisabledAlphaFields(class *storage.StorageClass) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
class.VolumeBindingMode = nil
|
||||
}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
class.AllowedTopologies = nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -250,8 +250,8 @@ func validateAllowedTopologies(topologies []api.TopologySelectorTerm, fldPath *f
|
|||
return allErrs
|
||||
}
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "field is disabled by feature-gate DynamicProvisioningScheduling"))
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "field is disabled by feature-gate VolumeScheduling"))
|
||||
}
|
||||
|
||||
rawTopologies := make([]map[string]sets.String, len(topologies))
|
||||
|
|
|
@ -140,7 +140,7 @@ const annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
|
|||
|
||||
// This annotation is added to a PVC that has been triggered by scheduler to
|
||||
// be dynamically provisioned. Its value is the name of the selected node.
|
||||
const annSelectedNode = "volume.alpha.kubernetes.io/selected-node"
|
||||
const annSelectedNode = "volume.kubernetes.io/selected-node"
|
||||
|
||||
// If the provisioner name in a storage class is set to "kubernetes.io/no-provisioner",
|
||||
// then dynamic provisioning is not supported by the storage.
|
||||
|
@ -290,14 +290,12 @@ func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentV
|
|||
return false, nil
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
// When feature DynamicProvisioningScheduling enabled,
|
||||
// Scheduler signal to the PV controller to start dynamic
|
||||
// provisioning by setting the "annSelectedNode" annotation
|
||||
// in the PVC
|
||||
if _, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
return false, nil
|
||||
}
|
||||
// When feature VolumeScheduling enabled,
|
||||
// Scheduler signal to the PV controller to start dynamic
|
||||
// provisioning by setting the "annSelectedNode" annotation
|
||||
// in the PVC
|
||||
if _, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
|
@ -1477,25 +1475,22 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
|||
}
|
||||
|
||||
var selectedNode *v1.Node = nil
|
||||
var allowedTopologies []v1.TopologySelectorTerm = nil
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
selectedNode, err = ctrl.NodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to get target node: %v", err)
|
||||
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return pluginName, err
|
||||
}
|
||||
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
selectedNode, err = ctrl.NodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to get target node: %v", err)
|
||||
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return pluginName, err
|
||||
}
|
||||
allowedTopologies = storageClass.AllowedTopologies
|
||||
}
|
||||
allowedTopologies := storageClass.AllowedTopologies
|
||||
|
||||
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision")
|
||||
volume, err = provisioner.Provision(selectedNode, allowedTopologies)
|
||||
opComplete(&err)
|
||||
if err != nil {
|
||||
// Other places of failure have nothing to do with DynamicProvisioningScheduling,
|
||||
// Other places of failure have nothing to do with VolumeScheduling,
|
||||
// so just let controller retry in the next sync. We'll only call func
|
||||
// rescheduleProvisioning here when the underlying provisioning actually failed.
|
||||
ctrl.rescheduleProvisioning(claim)
|
||||
|
|
|
@ -456,7 +456,7 @@ func TestAssumeUpdatePVCCache(t *testing.T) {
|
|||
|
||||
// Assume PVC
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Annotations["volume.alpha.kubernetes.io/selected-node"] = "test-node"
|
||||
newPVC.Annotations[annSelectedNode] = "test-node"
|
||||
if err := cache.Assume(newPVC); err != nil {
|
||||
t.Fatalf("failed to assume PVC: %v", err)
|
||||
}
|
||||
|
|
|
@ -25,12 +25,10 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
|
@ -169,13 +167,11 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
|||
return false, false, err
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
// Try to provision for unbound volumes
|
||||
if !unboundVolumesSatisfied {
|
||||
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
// Try to provision for unbound volumes
|
||||
if !unboundVolumesSatisfied {
|
||||
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -312,12 +312,6 @@ const (
|
|||
// Support Pod Ready++
|
||||
PodReadinessGates utilfeature.Feature = "PodReadinessGates"
|
||||
|
||||
// owner: @lichuqiang
|
||||
// alpha: v1.11
|
||||
//
|
||||
// Extend the default scheduler to be aware of volume topology and handle PV provisioning
|
||||
DynamicProvisioningScheduling utilfeature.Feature = "DynamicProvisioningScheduling"
|
||||
|
||||
// owner: @kevtaylor
|
||||
// alpha: v1.11
|
||||
//
|
||||
|
@ -415,7 +409,6 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
|
|||
RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
VolumeSubpath: {Default: true, PreRelease: utilfeature.GA},
|
||||
BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
DynamicProvisioningScheduling: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
PodReadinessGates: {Default: true, PreRelease: utilfeature.Beta},
|
||||
VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
KubeletPluginsWatcher: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
|
|
|
@ -345,7 +345,7 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone
|
|||
var zoneFromNode string
|
||||
// pick one zone from node if present
|
||||
if node != nil {
|
||||
// DynamicProvisioningScheduling implicit since node is not nil
|
||||
// VolumeScheduling implicit since node is not nil
|
||||
if zoneParameterPresent || zonesParameterPresent {
|
||||
return nil, fmt.Errorf("zone[s] cannot be specified in StorageClass if VolumeBindingMode is set to WaitForFirstConsumer. Please specify allowedTopologies in StorageClass for constraining zones")
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone
|
|||
}
|
||||
|
||||
if allowedZones.Len() > 0 {
|
||||
// DynamicProvisioningScheduling implicit since allowedZones present
|
||||
// VolumeScheduling implicit since allowedZones present
|
||||
if zoneParameterPresent || zonesParameterPresent {
|
||||
return nil, fmt.Errorf("zone[s] cannot be specified in StorageClass if allowedTopologies specified")
|
||||
}
|
||||
|
|
|
@ -487,16 +487,13 @@ func ClusterRoles() []rbacv1.ClusterRole {
|
|||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
rules := []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
rules = append(rules, rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie())
|
||||
}
|
||||
roles = append(roles, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:volume-scheduler"},
|
||||
Rules: rules,
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -62,16 +62,14 @@ type StorageClass struct {
|
|||
|
||||
// VolumeBindingMode indicates how PersistentVolumeClaims should be
|
||||
// provisioned and bound. When unset, VolumeBindingImmediate is used.
|
||||
// This field is alpha-level and is only honored by servers that enable
|
||||
// the VolumeScheduling feature.
|
||||
// This field is only honored by servers that enable the VolumeScheduling feature.
|
||||
// +optional
|
||||
VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"`
|
||||
|
||||
// Restrict the node topologies where volumes can be dynamically provisioned.
|
||||
// Each volume plugin defines its own supported topology specifications.
|
||||
// An empty TopologySelectorTerm list means there is no topology restriction.
|
||||
// This field is alpha-level and is only honored by servers that enable
|
||||
// the DynamicProvisioningScheduling feature.
|
||||
// This field is only honored by servers that enable the VolumeScheduling feature.
|
||||
// +optional
|
||||
AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"`
|
||||
}
|
||||
|
|
|
@ -62,16 +62,14 @@ type StorageClass struct {
|
|||
|
||||
// VolumeBindingMode indicates how PersistentVolumeClaims should be
|
||||
// provisioned and bound. When unset, VolumeBindingImmediate is used.
|
||||
// This field is alpha-level and is only honored by servers that enable
|
||||
// the VolumeScheduling feature.
|
||||
// This field is only honored by servers that enable the VolumeScheduling feature.
|
||||
// +optional
|
||||
VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"`
|
||||
|
||||
// Restrict the node topologies where volumes can be dynamically provisioned.
|
||||
// Each volume plugin defines its own supported topology specifications.
|
||||
// An empty TopologySelectorTerm list means there is no topology restriction.
|
||||
// This field is alpha-level and is only honored by servers that enable
|
||||
// the DynamicProvisioningScheduling feature.
|
||||
// This field is only honored by servers that enable the VolumeScheduling feature.
|
||||
// +optional
|
||||
AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"`
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue