mirror of https://github.com/k3s-io/k3s
Removes alpha feature gate for affinity annotations. Beta fields should be used.
parent
8679677e87
commit
4aea626944
|
@ -72,11 +72,6 @@ const (
|
|||
// This annotation can be attached to node.
|
||||
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
|
||||
|
||||
// AffinityAnnotationKey represents the key of affinity data (json serialized)
|
||||
// in the Annotations of a Pod.
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
||||
|
||||
// annotation key prefix used to identify non-convertible json paths.
|
||||
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
|
||||
|
||||
|
|
|
@ -519,21 +519,6 @@ func PodAnnotationsFromSysctls(sysctls []api.Sysctl) string {
|
|||
return strings.Join(kvs, ",")
|
||||
}
|
||||
|
||||
// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations
|
||||
// and converts it to the Affinity type in api.
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
func GetAffinityFromPodAnnotations(annotations map[string]string) (*api.Affinity, error) {
|
||||
if len(annotations) > 0 && annotations[api.AffinityAnnotationKey] != "" {
|
||||
var affinity api.Affinity
|
||||
err := json.Unmarshal([]byte(annotations[api.AffinityAnnotationKey]), &affinity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &affinity, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClass returns StorageClassName.
|
||||
func GetPersistentVolumeClass(volume *api.PersistentVolume) string {
|
||||
// Use beta annotation first
|
||||
|
|
|
@ -445,21 +445,6 @@ func RemoveTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) {
|
|||
return newNode, true, nil
|
||||
}
|
||||
|
||||
// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations
|
||||
// and converts it to the Affinity type in api.
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
func GetAffinityFromPodAnnotations(annotations map[string]string) (*v1.Affinity, error) {
|
||||
if len(annotations) > 0 && annotations[v1.AffinityAnnotationKey] != "" {
|
||||
var affinity v1.Affinity
|
||||
err := json.Unmarshal([]byte(annotations[v1.AffinityAnnotationKey]), &affinity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &affinity, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClass returns StorageClassName.
|
||||
func GetPersistentVolumeClass(volume *v1.PersistentVolume) string {
|
||||
// Use beta annotation first
|
||||
|
|
|
@ -443,63 +443,6 @@ func TestSysctlsFromPodAnnotation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
func TestGetAffinityFromPodAnnotations(t *testing.T) {
|
||||
testCases := []struct {
|
||||
pod *v1.Pod
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
"key": "foo",
|
||||
"operator": "In",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
}]
|
||||
}}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
"key": "foo",
|
||||
`,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
_, err := GetAffinityFromPodAnnotations(tc.pod.Annotations)
|
||||
if err == nil && tc.expectErr {
|
||||
t.Errorf("[%v]expected error but got none.", i)
|
||||
}
|
||||
if err != nil && !tc.expectErr {
|
||||
t.Errorf("[%v]did not expect error but got: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove when alpha support for topology constraints is removed
|
||||
func TestGetNodeAffinityFromAnnotations(t *testing.T) {
|
||||
testCases := []struct {
|
||||
|
|
|
@ -110,10 +110,6 @@ func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList
|
|||
func ValidatePodSpecificAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if annotations[api.AffinityAnnotationKey] != "" {
|
||||
allErrs = append(allErrs, ValidateAffinityInPodAnnotations(annotations, fldPath)...)
|
||||
}
|
||||
|
||||
if value, isMirror := annotations[api.MirrorPodAnnotationKey]; isMirror {
|
||||
if len(spec.NodeName) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Key(api.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set"))
|
||||
|
@ -164,23 +160,6 @@ func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath
|
|||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data
|
||||
func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
affinity, err := helper.GetAffinityFromPodAnnotations(annotations)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, api.AffinityAnnotationKey, err.Error()))
|
||||
return allErrs
|
||||
}
|
||||
if affinity == nil {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateAffinity(affinity, fldPath.Child("affinity"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
newAnnotations := newPod.Annotations
|
||||
|
@ -2430,9 +2409,7 @@ func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPa
|
|||
// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data
|
||||
func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AffinityInAnnotations) && len(podAffinityTerm.TopologyKey) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty"))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...)
|
||||
for _, name := range podAffinityTerm.Namespaces {
|
||||
for _, msg := range ValidateNamespaceName(name, false) {
|
||||
|
|
|
@ -5044,7 +5044,7 @@ func TestValidatePod(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestValidatePodWithDisabledAffinityInAnnotations(t *testing.T) {
|
||||
func TestValidatePodWithAffinity(t *testing.T) {
|
||||
validPodSpec := func(affinity *api.Affinity) api.PodSpec {
|
||||
spec := api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
|
@ -5057,7 +5057,6 @@ func TestValidatePodWithDisabledAffinityInAnnotations(t *testing.T) {
|
|||
return spec
|
||||
}
|
||||
|
||||
utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=False")
|
||||
errorCases := []api.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
|
@ -5129,29 +5128,30 @@ func TestValidatePodWithDisabledAffinityInAnnotations(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
Spec: validPodSpec(&api.Affinity{
|
||||
PodAntiAffinity: &api.PodAntiAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
PodAffinityTerm: api.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{"value1", "value2"},
|
||||
},
|
||||
/* TODO: Re-enable if/when topologykey is required.
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
Spec: validPodSpec(&api.Affinity{
|
||||
PodAntiAffinity: &api.PodAntiAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
PodAffinityTerm: api.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{"value1", "value2"},
|
||||
},
|
||||
},
|
||||
Namespaces: []string{"ns"},
|
||||
TopologyKey: "",
|
||||
},
|
||||
Namespaces: []string{"ns"},
|
||||
TopologyKey: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
}),*/
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -67,13 +67,6 @@ const (
|
|||
// Note: This feature is not supported for `BestEffort` pods.
|
||||
ExperimentalCriticalPodAnnotation utilfeature.Feature = "ExperimentalCriticalPodAnnotation"
|
||||
|
||||
// owner: @davidopp
|
||||
// alpha: v1.6
|
||||
//
|
||||
// Determines if affinity defined in annotations should be processed
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
AffinityInAnnotations utilfeature.Feature = "AffinityInAnnotations"
|
||||
|
||||
// owner: @vishh
|
||||
// alpha: v1.6
|
||||
//
|
||||
|
@ -137,7 +130,6 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
|
|||
DynamicVolumeProvisioning: {Default: true, PreRelease: utilfeature.Alpha},
|
||||
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta},
|
||||
ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
AffinityInAnnotations: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
Accelerators: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
TaintBasedEvictions: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
RotateKubeletServerCertificate: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
|
|
|
@ -59,7 +59,6 @@ go_test(
|
|||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -663,7 +663,7 @@ func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
|||
// 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity
|
||||
// 6. non-nil empty NodeSelectorRequirement is not allowed
|
||||
nodeAffinityMatches := true
|
||||
affinity := schedulercache.ReconcileAffinity(pod)
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity != nil && affinity.NodeAffinity != nil {
|
||||
nodeAffinity := affinity.NodeAffinity
|
||||
// if no required NodeAffinity requirements, will do no-op, means select all nodes.
|
||||
|
@ -985,7 +985,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta interface
|
|||
}
|
||||
|
||||
// Now check if <pod> requirements will be satisfied on this node.
|
||||
affinity := schedulercache.ReconcileAffinity(pod)
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
@ -1089,7 +1089,7 @@ func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*scheduler
|
|||
}
|
||||
var nodeResult []matchingPodAntiAffinityTerm
|
||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||
affinity := schedulercache.ReconcileAffinity(existingPod)
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity == nil {
|
||||
continue
|
||||
}
|
||||
|
@ -1116,7 +1116,7 @@ func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*scheduler
|
|||
func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods []*v1.Pod) ([]matchingPodAntiAffinityTerm, error) {
|
||||
var result []matchingPodAntiAffinityTerm
|
||||
for _, existingPod := range allPods {
|
||||
affinity := schedulercache.ReconcileAffinity(existingPod)
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -71,7 +71,6 @@ go_test(
|
|||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm
|
|||
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
|
||||
// symmetry need to be considered for hard requirements from podAffinity
|
||||
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
affinity := schedulercache.ReconcileAffinity(pod)
|
||||
affinity := pod.Spec.Affinity
|
||||
hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
|
||||
hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
|
||||
|
||||
|
@ -138,7 +138,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingPodAffinity := schedulercache.ReconcileAffinity(existingPod)
|
||||
existingPodAffinity := existingPod.Spec.Affinity
|
||||
existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil
|
||||
existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing"
|
||||
|
@ -614,567 +613,3 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
func TestInterPodAffinityAnnotationsPriority(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=true")
|
||||
labelRgChina := map[string]string{
|
||||
"region": "China",
|
||||
}
|
||||
labelRgIndia := map[string]string{
|
||||
"region": "India",
|
||||
}
|
||||
labelAzAz1 := map[string]string{
|
||||
"az": "az1",
|
||||
}
|
||||
labelAzAz2 := map[string]string{
|
||||
"az": "az2",
|
||||
}
|
||||
labelRgChinaAzAz1 := map[string]string{
|
||||
"region": "China",
|
||||
"az": "az1",
|
||||
}
|
||||
podLabelSecurityS1 := map[string]string{
|
||||
"security": "S1",
|
||||
}
|
||||
podLabelSecurityS2 := map[string]string{
|
||||
"security": "S2",
|
||||
}
|
||||
// considered only preferredDuringSchedulingIgnoredDuringExecution in pod affinity
|
||||
stayWithS1InRegion := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S1"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
stayWithS2InRegion := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 6,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
affinity3 := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"weight": 8,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "NotIn",
|
||||
"values":["S1"]
|
||||
}, {
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}, {
|
||||
"weight": 2,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "Exists"
|
||||
}, {
|
||||
"key": "wrongkey",
|
||||
"operator": "DoesNotExist"
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
}
|
||||
hardAffinity := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"labelSelector":{
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values": ["S1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}, {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "Exists"
|
||||
}, {
|
||||
"key": "wrongkey",
|
||||
"operator": "DoesNotExist"
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
}
|
||||
awayFromS1InAz := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S1"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "az"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
// to stay away from security S2 in any az.
|
||||
awayFromS2InAz := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "az"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
// to stay with security S1 in same region, stay away from security S2 in any az.
|
||||
stayWithS1InRegionAwayFromS2InAz := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 8,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S1"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]
|
||||
},
|
||||
"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "az"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "all machines are same priority as Affinity is nil",
|
||||
},
|
||||
// the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||
// the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score
|
||||
// the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
|
||||
"which doesn't match either pods in nodes or in topology key",
|
||||
},
|
||||
// the node1(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||
// the node2(machine2) that have the label {"region": "China"}, match the topology key and have the same label value with node1, get the same high score with node1
|
||||
// the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector,
|
||||
// get a low score.
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Annotations: stayWithS1InRegion}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
|
||||
},
|
||||
// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
|
||||
// But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia.
|
||||
// Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score),
|
||||
// while all the nodes in regionIndia should get another same score(low score).
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}},
|
||||
test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
|
||||
},
|
||||
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
|
||||
},
|
||||
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
|
||||
// but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference.
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
},
|
||||
|
||||
// The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity.
|
||||
// the nodes that have the label {"node": "bar"} (match the topology key) and that have existing pods that match the labelSelector get low score
|
||||
// the nodes that don't have the label {"node": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get high score
|
||||
// the nodes that have the label {"node": "bar"} (match the topology key) but that have existing pods that mismatch the labelSelector get high score
|
||||
// there are 2 nodes, say node1 and node2, both nodes have pods that match the labelSelector and have topology-key in node.Labels.
|
||||
// But there are more pods on node1 that match the preference than node2. Then, node1 get a lower score than node2.
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
|
||||
},
|
||||
// Test the symmetry cases for anti affinity
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
|
||||
},
|
||||
// Test both affinity and anti-affinity
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
|
||||
},
|
||||
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
|
||||
// the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level,
|
||||
// so that all the pods of a RC/service can stay in a same region but trying to separate with each other
|
||||
// machine-1,machine-3,machine-4 are in ChinaRegion others machin-2,machine-5 are in IndiaRegion
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}},
|
||||
test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
|
||||
},
|
||||
// Consider Affinity, Anti Affinity and symmetry together.
|
||||
// for Affinity, the weights are: 8, 0, 0, 0
|
||||
// for Anti Affinity, the weights are: 0, -5, 0, 0
|
||||
// for Affinity symmetry, the weights are: 0, 0, 8, 0
|
||||
// for Anti Affinity symmetry, the weights are: 0, 0, 0, -5
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: metav1.ObjectMeta{Annotations: awayFromS1InAz}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}},
|
||||
test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||
interPodAffinity := InterPodAffinity{
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: schedulertesting.FakeNodeLister(test.nodes),
|
||||
podLister: schedulertesting.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
}
|
||||
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
func TestHardPodAffinityAnnotationsSymmetricWeight(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=true")
|
||||
podLabelServiceS1 := map[string]string{
|
||||
"service": "S1",
|
||||
}
|
||||
labelRgChina := map[string]string{
|
||||
"region": "China",
|
||||
}
|
||||
labelRgIndia := map[string]string{
|
||||
"region": "India",
|
||||
}
|
||||
labelAzAz1 := map[string]string{
|
||||
"az": "az1",
|
||||
}
|
||||
hardPodAffinity := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"labelSelector":{
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["S1"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
}
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
hardPodAffinityWeight int
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: 0,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||
ipa := InterPodAffinity{
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: schedulertesting.FakeNodeLister(test.nodes),
|
||||
podLister: schedulertesting.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: test.hardPodAffinityWeight,
|
||||
}
|
||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,6 @@ func PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.Nod
|
|||
return &priorityMetadata{
|
||||
nonZeroRequest: getNonZeroRequests(pod),
|
||||
podTolerations: tolerationsPreferNoSchedule,
|
||||
affinity: schedulercache.ReconcileAffinity(pod),
|
||||
affinity: pod.Spec.Affinity,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
|
|||
affinity = priorityMeta.affinity
|
||||
} else {
|
||||
// We couldn't parse metadata - fallback to the podspec.
|
||||
affinity = schedulercache.ReconcileAffinity(pod)
|
||||
affinity = pod.Spec.Affinity
|
||||
}
|
||||
|
||||
var count int32
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
@ -178,147 +177,3 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
func TestNodeAffinityAnnotationsPriority(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=true")
|
||||
label1 := map[string]string{"foo": "bar"}
|
||||
label2 := map[string]string{"key": "value"}
|
||||
label3 := map[string]string{"az": "az1"}
|
||||
label4 := map[string]string{"abc": "az11", "def": "az22"}
|
||||
label5 := map[string]string{"foo": "bar", "key": "value", "az": "az1"}
|
||||
|
||||
affinity1 := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"weight": 2,
|
||||
"preference": {
|
||||
"matchExpressions": [
|
||||
{
|
||||
"key": "foo",
|
||||
"operator": "In", "values": ["bar"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]}}`,
|
||||
}
|
||||
|
||||
affinity2 := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"weight": 2,
|
||||
"preference": {"matchExpressions": [
|
||||
{
|
||||
"key": "foo",
|
||||
"operator": "In", "values": ["bar"]
|
||||
}
|
||||
]}
|
||||
},
|
||||
{
|
||||
"weight": 4,
|
||||
"preference": {"matchExpressions": [
|
||||
{
|
||||
"key": "key",
|
||||
"operator": "In", "values": ["value"]
|
||||
}
|
||||
]}
|
||||
},
|
||||
{
|
||||
"weight": 5,
|
||||
"preference": {"matchExpressions": [
|
||||
{
|
||||
"key": "foo",
|
||||
"operator": "In", "values": ["bar"]
|
||||
},
|
||||
{
|
||||
"key": "key",
|
||||
"operator": "In", "values": ["value"]
|
||||
},
|
||||
{
|
||||
"key": "az",
|
||||
"operator": "In", "values": ["az1"]
|
||||
}
|
||||
]}
|
||||
}
|
||||
]}}`,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "all machines are same priority as NodeAffinity is nil",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: affinity1,
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label4}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: affinity1,
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "only machine1 matches the preferred scheduling requirements of pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: affinity2,
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}},
|
||||
test: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
||||
nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce)
|
||||
list, err := nap(test.pod, nodeNameToInfo, test.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: \nexpected %#v, \ngot %#v", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,17 @@ load(
|
|||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["topologies_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
|
@ -17,12 +28,10 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -38,14 +47,3 @@ filegroup(
|
|||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["topologies_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -20,8 +20,6 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// GetNamespacesFromPodAffinityTerm returns a set of names
|
||||
|
@ -79,15 +77,5 @@ type Topologies struct {
|
|||
// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||
// If the topologyKey is empty, check if the two nodes have any of the default topologyKeys, and have same corresponding label value.
|
||||
func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AffinityInAnnotations) && len(topologyKey) == 0 {
|
||||
// assumes this is allowed only for PreferredDuringScheduling pod anti-affinity (ensured by api/validation)
|
||||
for _, defaultKey := range tps.DefaultKeys {
|
||||
if NodesHaveSameTopologyKey(nodeA, nodeB, defaultKey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
} else {
|
||||
return NodesHaveSameTopologyKey(nodeA, nodeB, topologyKey)
|
||||
}
|
||||
return NodesHaveSameTopologyKey(nodeA, nodeB, topologyKey)
|
||||
}
|
||||
|
|
|
@ -14,30 +14,24 @@ go_library(
|
|||
"cache.go",
|
||||
"interface.go",
|
||||
"node_info.go",
|
||||
"reconcile_affinity.go",
|
||||
"util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cache_test.go",
|
||||
"reconcile_affinity_test.go",
|
||||
],
|
||||
srcs = ["cache_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
@ -48,7 +42,6 @@ go_test(
|
|||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ func (n *NodeInfo) String() string {
|
|||
}
|
||||
|
||||
func hasPodAffinityConstraints(pod *v1.Pod) bool {
|
||||
affinity := ReconcileAffinity(pod)
|
||||
affinity := pod.Spec.Affinity
|
||||
return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package schedulercache
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// Reconcile api and annotation affinity definitions.
|
||||
// When alpha affinity feature is not enabled, always take affinity
|
||||
// from PodSpec.When alpha affinity feature is enabled, if affinity
|
||||
// is not set in PodSpec, take affinity from annotation.
|
||||
// When alpha affinity feature is enabled, if affinity is set in PodSpec,
|
||||
// take node affinity, pod affinity, and pod anti-affinity individually
|
||||
// using the following rule: take affinity from PodSpec if it is defined,
|
||||
// otherwise take from annotation if it is defined.
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
func ReconcileAffinity(pod *v1.Pod) *v1.Affinity {
|
||||
affinity := pod.Spec.Affinity
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AffinityInAnnotations) {
|
||||
annotationsAffinity, _ := v1helper.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if affinity == nil && annotationsAffinity != nil {
|
||||
affinity = annotationsAffinity
|
||||
} else if annotationsAffinity != nil {
|
||||
if affinity != nil && affinity.NodeAffinity == nil && annotationsAffinity.NodeAffinity != nil {
|
||||
affinity.NodeAffinity = annotationsAffinity.NodeAffinity
|
||||
}
|
||||
if affinity != nil && affinity.PodAffinity == nil && annotationsAffinity.PodAffinity != nil {
|
||||
affinity.PodAffinity = annotationsAffinity.PodAffinity
|
||||
}
|
||||
if affinity != nil && affinity.PodAntiAffinity == nil && annotationsAffinity.PodAntiAffinity != nil {
|
||||
affinity.PodAntiAffinity = annotationsAffinity.PodAntiAffinity
|
||||
}
|
||||
}
|
||||
}
|
||||
return affinity
|
||||
}
|
|
@ -1,151 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package schedulercache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
)
|
||||
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
func TestReconcileAffinity(t *testing.T) {
|
||||
baseAffinity := &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"bar", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "security",
|
||||
Operator: metav1.LabelSelectorOpDoesNotExist,
|
||||
Values: []string{"securityscan"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "topologyKey1",
|
||||
},
|
||||
},
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"S1", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "topologyKey2",
|
||||
Namespaces: []string{"ns1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeAffinityAnnotation := map[string]string{
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"weight": 2,
|
||||
"preference": {"matchExpressions": [
|
||||
{
|
||||
"key": "foo",
|
||||
"operator": "In", "values": ["bar"]
|
||||
}
|
||||
]}
|
||||
}
|
||||
]}}`,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
pod *v1.Pod
|
||||
expected *v1.Affinity
|
||||
annotationsEnabled bool
|
||||
}{
|
||||
{
|
||||
// affinity is set in both PodSpec and annotations; take from PodSpec.
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: nodeAffinityAnnotation,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: baseAffinity,
|
||||
},
|
||||
},
|
||||
expected: baseAffinity,
|
||||
annotationsEnabled: true,
|
||||
},
|
||||
{
|
||||
// affinity is only set in annotation; take from annotation.
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: nodeAffinityAnnotation,
|
||||
},
|
||||
},
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 2,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
annotationsEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("AffinityInAnnotations=%t", tc.annotationsEnabled))
|
||||
affinity := ReconcileAffinity(tc.pod)
|
||||
if !reflect.DeepEqual(affinity, tc.expected) {
|
||||
t.Errorf("[%v] Did not get expected affinity:\n\n%v\n\n. got:\n\n %v", i, tc.expected, affinity)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -72,11 +72,6 @@ const (
|
|||
// This annotation can be attached to node.
|
||||
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
|
||||
|
||||
// AffinityAnnotationKey represents the key of affinity data (json serialized)
|
||||
// in the Annotations of a Pod.
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
||||
|
||||
// annotation key prefix used to identify non-convertible json paths.
|
||||
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
|
||||
|
||||
|
|
Loading…
Reference in New Issue