Merge pull request #59952 from resouer/consts-handler

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Use consts as predicate key names in handlers

**What this PR does / why we need it**:

Per discussion in: https://github.com/kubernetes/kubernetes/pull/59335/files#r168351460

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #59951

**Special notes for your reviewer**:

**Release note**:

```release-note
Use consts as predicate name in handlers
```
pull/6/head
Kubernetes Submit Queue 2018-02-22 15:45:16 -08:00 committed by GitHub
commit a195a76151
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 23 additions and 23 deletions

View File

@ -72,8 +72,8 @@ const (
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
// checkServiceAffinityPred defines the name of predicate checkServiceAffinity.
checkServiceAffinityPred = "checkServiceAffinity"
// CheckServiceAffinityPred defines the name of predicate checkServiceAffinity.
CheckServiceAffinityPred = "CheckServiceAffinity"
// MaxEBSVolumeCountPred defines the name of predicate MaxEBSVolumeCount.
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
// MaxGCEPDVolumeCountPred defines the name of predicate MaxGCEPDVolumeCount.
@ -128,7 +128,7 @@ var (
GeneralPred, HostNamePred, PodFitsHostPortsPred,
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
checkServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred,
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred,
MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
CheckNodeMemoryPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
)

View File

@ -70,11 +70,11 @@ const (
)
var (
serviceAffinitySet = sets.NewString("ServiceAffinity")
matchInterPodAffinitySet = sets.NewString("MatchInterPodAffinity")
generalPredicatesSets = sets.NewString("GeneralPredicates")
noDiskConflictSet = sets.NewString("NoDiskConflict")
maxPDVolumeCountPredicateKeys = []string{"MaxGCEPDVolumeCount", "MaxAzureDiskVolumeCount", "MaxEBSVolumeCount"}
serviceAffinitySet = sets.NewString(predicates.CheckServiceAffinityPred)
matchInterPodAffinitySet = sets.NewString(predicates.MatchInterPodAffinityPred)
generalPredicatesSets = sets.NewString(predicates.GeneralPred)
noDiskConflictSet = sets.NewString(predicates.NoDiskConflictPred)
maxPDVolumeCountPredicateKeys = []string{predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred, predicates.MaxEBSVolumeCountPred}
)
// configFactory is the default implementation of the scheduler.Configurator interface.
@ -377,7 +377,7 @@ func (c *configFactory) invalidatePredicatesForPvUpdate(oldPV, newPV *v1.Persist
for k, v := range newPV.Labels {
// If PV update modifies the zone/region labels.
if isZoneRegionLabel(k) && !reflect.DeepEqual(v, oldPV.Labels[k]) {
invalidPredicates.Insert("NoVolumeZoneConflict")
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)
break
}
}
@ -434,19 +434,19 @@ func (c *configFactory) invalidatePredicatesForPv(pv *v1.PersistentVolume) {
// PV types which impact MaxPDVolumeCountPredicate
if pv.Spec.AWSElasticBlockStore != nil {
invalidPredicates.Insert("MaxEBSVolumeCount")
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred)
}
if pv.Spec.GCEPersistentDisk != nil {
invalidPredicates.Insert("MaxGCEPDVolumeCount")
invalidPredicates.Insert(predicates.MaxGCEPDVolumeCountPred)
}
if pv.Spec.AzureDisk != nil {
invalidPredicates.Insert("MaxAzureDiskVolumeCount")
invalidPredicates.Insert(predicates.MaxAzureDiskVolumeCountPred)
}
// If PV contains zone related label, it may impact cached NoVolumeZoneConflict
for k := range pv.Labels {
if isZoneRegionLabel(k) {
invalidPredicates.Insert("NoVolumeZoneConflict")
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)
break
}
}
@ -520,7 +520,7 @@ func (c *configFactory) invalidatePredicatesForPvc(pvc *v1.PersistentVolumeClaim
invalidPredicates := sets.NewString(maxPDVolumeCountPredicateKeys...)
// The bound volume's label may change
invalidPredicates.Insert("NoVolumeZoneConflict")
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
// Add/delete impacts the available PVs to choose from
@ -779,19 +779,19 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
invalidPredicates := sets.NewString()
if !reflect.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable) {
invalidPredicates.Insert("GeneralPredicates") // "PodFitsResources"
invalidPredicates.Insert(predicates.GeneralPred) // "PodFitsResources"
}
if !reflect.DeepEqual(oldNode.GetLabels(), newNode.GetLabels()) {
invalidPredicates.Insert("GeneralPredicates", "ServiceAffinity") // "PodSelectorMatches"
invalidPredicates.Insert(predicates.GeneralPred, predicates.CheckServiceAffinityPred) // "PodSelectorMatches"
for k, v := range oldNode.GetLabels() {
// any label can be topology key of pod, we have to invalidate in all cases
if v != newNode.GetLabels()[k] {
invalidPredicates.Insert("MatchInterPodAffinity")
invalidPredicates.Insert(predicates.MatchInterPodAffinityPred)
}
// NoVolumeZoneConflict will only be affected by zone related label change
if isZoneRegionLabel(k) {
if v != newNode.GetLabels()[k] {
invalidPredicates.Insert("NoVolumeZoneConflict")
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)
}
}
}
@ -807,7 +807,7 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
}
if !reflect.DeepEqual(oldTaints, newTaints) ||
!reflect.DeepEqual(oldNode.Spec.Taints, newNode.Spec.Taints) {
invalidPredicates.Insert("PodToleratesNodeTaints")
invalidPredicates.Insert(predicates.PodToleratesNodeTaintsPred)
}
if !reflect.DeepEqual(oldNode.Status.Conditions, newNode.Status.Conditions) {
@ -820,19 +820,19 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
newConditions[cond.Type] = cond.Status
}
if oldConditions[v1.NodeMemoryPressure] != newConditions[v1.NodeMemoryPressure] {
invalidPredicates.Insert("CheckNodeMemoryPressure")
invalidPredicates.Insert(predicates.CheckNodeMemoryPressurePred)
}
if oldConditions[v1.NodeDiskPressure] != newConditions[v1.NodeDiskPressure] {
invalidPredicates.Insert("CheckNodeDiskPressure")
invalidPredicates.Insert(predicates.CheckNodeDiskPressurePred)
}
if oldConditions[v1.NodeReady] != newConditions[v1.NodeReady] ||
oldConditions[v1.NodeOutOfDisk] != newConditions[v1.NodeOutOfDisk] ||
oldConditions[v1.NodeNetworkUnavailable] != newConditions[v1.NodeNetworkUnavailable] {
invalidPredicates.Insert("CheckNodeCondition")
invalidPredicates.Insert(predicates.CheckNodeConditionPred)
}
}
if newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable {
invalidPredicates.Insert("CheckNodeCondition")
invalidPredicates.Insert(predicates.CheckNodeConditionPred)
}
c.equivalencePodCache.InvalidateCachedPredicateItem(newNode.GetName(), invalidPredicates)
}