mirror of https://github.com/k3s-io/k3s
implement inter pod topological affinity and anti-affinity
parent
28a8a23471
commit
82ba4f077e
|
@ -183,7 +183,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
|||
handler.delegate = m.Handler
|
||||
|
||||
// Scheduler
|
||||
schedulerConfigFactory := factory.NewConfigFactory(cl, api.DefaultSchedulerName)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(cl, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
glog.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
|
|
|
@ -56,7 +56,9 @@ kube-scheduler
|
|||
```
|
||||
--address="0.0.0.0": The IP address to serve on (set to 0.0.0.0 for all interfaces)
|
||||
--algorithm-provider="DefaultProvider": The scheduling algorithm provider to use, one of: DefaultProvider
|
||||
--failure-domains="kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region": Indicate the "all topologies" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
||||
--hard-pod-affinity-symmetric-weight=1: RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule.
|
||||
--kube-api-burst=100: Burst to use while talking with kubernetes apiserver
|
||||
--kube-api-content-type="": ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.
|
||||
--kube-api-qps=50: QPS to use while talking with kubernetes apiserver
|
||||
|
@ -73,7 +75,7 @@ kube-scheduler
|
|||
--scheduler-name="default-scheduler": Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'
|
||||
```
|
||||
|
||||
###### Auto generated by spf13/cobra on 21-Apr-2016
|
||||
###### Auto generated by spf13/cobra on 5-May-2016
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
|
@ -333,7 +333,6 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
},
|
||||
"../docs/user-guide/node-selection": {
|
||||
"pod": &api.Pod{},
|
||||
"pod-with-node-affinity": &api.Pod{},
|
||||
},
|
||||
"../examples/openshift-origin": {
|
||||
"openshift-origin-namespace": &api.Namespace{},
|
||||
|
|
|
@ -129,6 +129,7 @@ experimental-prefix
|
|||
external-hostname
|
||||
external-ip
|
||||
failover-timeout
|
||||
failure-domains
|
||||
fake-clientset
|
||||
file-check-frequency
|
||||
file-suffix
|
||||
|
@ -153,6 +154,7 @@ google-json-key
|
|||
grace-period
|
||||
ha-domain
|
||||
hairpin-mode
|
||||
hard-pod-affinity-symmetric-weight
|
||||
healthz-bind-address
|
||||
healthz-port
|
||||
horizontal-pod-autoscaler-sync-period
|
||||
|
|
|
@ -129,6 +129,9 @@ func init() {
|
|||
DeepCopy_api_PersistentVolumeSpec,
|
||||
DeepCopy_api_PersistentVolumeStatus,
|
||||
DeepCopy_api_Pod,
|
||||
DeepCopy_api_PodAffinity,
|
||||
DeepCopy_api_PodAffinityTerm,
|
||||
DeepCopy_api_PodAntiAffinity,
|
||||
DeepCopy_api_PodAttachOptions,
|
||||
DeepCopy_api_PodCondition,
|
||||
DeepCopy_api_PodExecOptions,
|
||||
|
@ -175,6 +178,7 @@ func init() {
|
|||
DeepCopy_api_Volume,
|
||||
DeepCopy_api_VolumeMount,
|
||||
DeepCopy_api_VolumeSource,
|
||||
DeepCopy_api_WeightedPodAffinityTerm,
|
||||
); err != nil {
|
||||
// if one of the deep copy functions is malformed, detect it immediately.
|
||||
panic(err)
|
||||
|
@ -199,6 +203,24 @@ func DeepCopy_api_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) err
|
|||
} else {
|
||||
out.NodeAffinity = nil
|
||||
}
|
||||
if in.PodAffinity != nil {
|
||||
in, out := in.PodAffinity, &out.PodAffinity
|
||||
*out = new(PodAffinity)
|
||||
if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PodAffinity = nil
|
||||
}
|
||||
if in.PodAntiAffinity != nil {
|
||||
in, out := in.PodAntiAffinity, &out.PodAntiAffinity
|
||||
*out = new(PodAntiAffinity)
|
||||
if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PodAntiAffinity = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1964,6 +1986,79 @@ func DeepCopy_api_Pod(in Pod, out *Pod, c *conversion.Cloner) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_api_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error {
|
||||
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]PodAffinityTerm, len(in))
|
||||
for i := range in {
|
||||
if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.RequiredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]WeightedPodAffinityTerm, len(in))
|
||||
for i := range in {
|
||||
if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PreferredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_api_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error {
|
||||
if in.LabelSelector != nil {
|
||||
in, out := in.LabelSelector, &out.LabelSelector
|
||||
*out = new(unversioned.LabelSelector)
|
||||
if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.LabelSelector = nil
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := in.Namespaces, &out.Namespaces
|
||||
*out = make([]string, len(in))
|
||||
copy(*out, in)
|
||||
} else {
|
||||
out.Namespaces = nil
|
||||
}
|
||||
out.TopologyKey = in.TopologyKey
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_api_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error {
|
||||
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]PodAffinityTerm, len(in))
|
||||
for i := range in {
|
||||
if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.RequiredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]WeightedPodAffinityTerm, len(in))
|
||||
for i := range in {
|
||||
if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PreferredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_api_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error {
|
||||
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
|
||||
return err
|
||||
|
@ -3037,3 +3132,11 @@ func DeepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_api_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error {
|
||||
out.Weight = in.Weight
|
||||
if err := DeepCopy_api_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
112
pkg/api/types.go
112
pkg/api/types.go
|
@ -1159,11 +1159,109 @@ const (
|
|||
NodeSelectorOpLt NodeSelectorOperator = "Lt"
|
||||
)
|
||||
|
||||
// Affinity is a group of affinity scheduling rules, currently
|
||||
// only node affinity, but in the future also inter-pod affinity.
|
||||
// Affinity is a group of affinity scheduling rules.
|
||||
type Affinity struct {
|
||||
// Describes node affinity scheduling rules for the pod.
|
||||
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
|
||||
PodAffinity *PodAffinity `json:"podAffinity,omitempty"`
|
||||
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
|
||||
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty"`
|
||||
}
|
||||
|
||||
// Pod affinity is a group of inter pod affinity scheduling rules.
|
||||
type PodAffinity struct {
|
||||
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
|
||||
// If the affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system will try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
|
||||
// If the affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system may or may not try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
|
||||
// The scheduler will prefer to schedule pods to nodes that satisfy
|
||||
// the affinity expressions specified by this field, but it may choose
|
||||
// a node that violates one or more of the expressions. The node that is
|
||||
// most preferred is the one with the greatest sum of weights, i.e.
|
||||
// for each node that meets all of the scheduling requirements (resource
|
||||
// request, requiredDuringScheduling affinity expressions, etc.),
|
||||
// compute a sum by iterating through the elements of this field and adding
|
||||
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
// node(s) with the highest sum are the most preferred.
|
||||
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
|
||||
}
|
||||
|
||||
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
|
||||
type PodAntiAffinity struct {
|
||||
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
|
||||
// If the anti-affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the anti-affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system will try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
|
||||
// If the anti-affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the anti-affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system may or may not try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
|
||||
// The scheduler will prefer to schedule pods to nodes that satisfy
|
||||
// the anti-affinity expressions specified by this field, but it may choose
|
||||
// a node that violates one or more of the expressions. The node that is
|
||||
// most preferred is the one with the greatest sum of weights, i.e.
|
||||
// for each node that meets all of the scheduling requirements (resource
|
||||
// request, requiredDuringScheduling anti-affinity expressions, etc.),
|
||||
// compute a sum by iterating through the elements of this field and adding
|
||||
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
// node(s) with the highest sum are the most preferred.
|
||||
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
|
||||
}
|
||||
|
||||
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
type WeightedPodAffinityTerm struct {
|
||||
// weight associated with matching the corresponding podAffinityTerm,
|
||||
// in the range 1-100.
|
||||
Weight int `json:"weight"`
|
||||
// Required. A pod affinity term, associated with the corresponding weight.
|
||||
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm"`
|
||||
}
|
||||
|
||||
// Defines a set of pods (namely those matching the labelSelector
|
||||
// relative to the given namespace(s)) that this pod should be
|
||||
// co-located (affinity) or not co-located (anti-affinity) with,
|
||||
// where co-located is defined as running on a node whose value of
|
||||
// the label with key <topologyKey> matches that of any node on which
|
||||
// a pod of the set of pods is running.
|
||||
type PodAffinityTerm struct {
|
||||
// A label query over a set of resources, in this case pods.
|
||||
LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty"`
|
||||
// namespaces specifies which namespaces the labelSelector applies to (matches against);
|
||||
// nil list means "this pod's namespace," empty list means "all namespaces"
|
||||
// The json tag here is not "omitempty" since we need to distinguish nil and empty.
|
||||
// See https://golang.org/pkg/encoding/json/#Marshal for more details.
|
||||
Namespaces []string `json:"namespaces"`
|
||||
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
|
||||
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
|
||||
// whose value of the label with key topologyKey matches that of any node on which any of the
|
||||
// selected pods is running.
|
||||
// For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies"
|
||||
// ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains);
|
||||
// for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.
|
||||
TopologyKey string `json:"topologyKey,omitempty"`
|
||||
}
|
||||
|
||||
// Node affinity is a group of node affinity scheduling rules.
|
||||
|
@ -2604,4 +2702,14 @@ type RangeAllocation struct {
|
|||
const (
|
||||
// "default-scheduler" is the name of default scheduler.
|
||||
DefaultSchedulerName = "default-scheduler"
|
||||
|
||||
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
// When the --hard-pod-affinity-weight scheduler flag is not specified,
|
||||
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
|
||||
DefaultHardPodAffinitySymmetricWeight int = 1
|
||||
|
||||
// When the --failure-domains scheduler flag is not specified,
|
||||
// DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
|
||||
DefaultFailureDomains string = unversioned.LabelHostname + "," + unversioned.LabelZoneFailureDomain + "," + unversioned.LabelZoneRegion
|
||||
)
|
||||
|
|
|
@ -17,17 +17,24 @@ limitations under the License.
|
|||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
apivalidation "k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/util/validation"
|
||||
"k8s.io/kubernetes/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
var (
|
||||
labelValueErrorMsg string = fmt.Sprintf(`must have at most %d characters, matching regex %s: e.g. "MyValue" or ""`, validation.LabelValueMaxLength, validation.LabelValueFmt)
|
||||
qualifiedNameErrorMsg string = fmt.Sprintf(`must be a qualified name (at most %d characters, matching regex %s), with an optional DNS subdomain prefix (at most %d characters, matching regex %s) and slash (/): e.g. "MyName" or "example.com/MyName"`, validation.QualifiedNameMaxLength, validation.QualifiedNameFmt, validation.DNS1123SubdomainMaxLength, validation.DNS1123SubdomainFmt)
|
||||
)
|
||||
|
||||
func ValidateLabelSelector(ps *unversioned.LabelSelector, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if ps == nil {
|
||||
return allErrs
|
||||
}
|
||||
allErrs = append(allErrs, apivalidation.ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...)
|
||||
allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...)
|
||||
for i, expr := range ps.MatchExpressions {
|
||||
allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...)
|
||||
}
|
||||
|
@ -48,6 +55,27 @@ func ValidateLabelSelectorRequirement(sr unversioned.LabelSelectorRequirement, f
|
|||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
|
||||
}
|
||||
allErrs = append(allErrs, apivalidation.ValidateLabelName(sr.Key, fldPath.Child("key"))...)
|
||||
allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateLabelName validates that the label name is correctly defined.
|
||||
func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if !validation.IsQualifiedName(labelName) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, labelName, qualifiedNameErrorMsg))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateLabels validates that a set of labels are correctly defined.
|
||||
func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for k, v := range labels {
|
||||
allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
|
||||
if !validation.IsValidLabelValue(v) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, v, labelValueErrorMsg))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
func TestValidateLabels(t *testing.T) {
|
||||
successCases := []map[string]string{
|
||||
{"simple": "bar"},
|
||||
{"now-with-dashes": "bar"},
|
||||
{"1-starts-with-num": "bar"},
|
||||
{"1234": "bar"},
|
||||
{"simple/simple": "bar"},
|
||||
{"now-with-dashes/simple": "bar"},
|
||||
{"now-with-dashes/now-with-dashes": "bar"},
|
||||
{"now.with.dots/simple": "bar"},
|
||||
{"now-with.dashes-and.dots/simple": "bar"},
|
||||
{"1-num.2-num/3-num": "bar"},
|
||||
{"1234/5678": "bar"},
|
||||
{"1.2.3.4/5678": "bar"},
|
||||
{"UpperCaseAreOK123": "bar"},
|
||||
{"goodvalue": "123_-.BaR"},
|
||||
}
|
||||
for i := range successCases {
|
||||
errs := ValidateLabels(successCases[i], field.NewPath("field"))
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("case[%d] expected success, got %#v", i, errs)
|
||||
}
|
||||
}
|
||||
|
||||
labelNameErrorCases := []map[string]string{
|
||||
{"nospecialchars^=@": "bar"},
|
||||
{"cantendwithadash-": "bar"},
|
||||
{"only/one/slash": "bar"},
|
||||
{strings.Repeat("a", 254): "bar"},
|
||||
}
|
||||
for i := range labelNameErrorCases {
|
||||
errs := ValidateLabels(labelNameErrorCases[i], field.NewPath("field"))
|
||||
if len(errs) != 1 {
|
||||
t.Errorf("case[%d] expected failure", i)
|
||||
} else {
|
||||
detail := errs[0].Detail
|
||||
if detail != qualifiedNameErrorMsg {
|
||||
t.Errorf("error detail %s should be equal %s", detail, qualifiedNameErrorMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
labelValueErrorCases := []map[string]string{
|
||||
{"toolongvalue": strings.Repeat("a", 64)},
|
||||
{"backslashesinvalue": "some\\bad\\value"},
|
||||
{"nocommasallowed": "bad,value"},
|
||||
{"strangecharsinvalue": "?#$notsogood"},
|
||||
}
|
||||
for i := range labelValueErrorCases {
|
||||
errs := ValidateLabels(labelValueErrorCases[i], field.NewPath("field"))
|
||||
if len(errs) != 1 {
|
||||
t.Errorf("case[%d] expected failure", i)
|
||||
} else {
|
||||
detail := errs[0].Detail
|
||||
if detail != labelValueErrorMsg {
|
||||
t.Errorf("error detail %s should be equal %s", detail, labelValueErrorMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,7 +16,12 @@ limitations under the License.
|
|||
|
||||
package unversioned
|
||||
|
||||
const LabelHostname = "kubernetes.io/hostname"
|
||||
const LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
|
||||
const LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
|
||||
const LabelInstanceType = "beta.kubernetes.io/instance-type"
|
||||
const (
|
||||
// If you add a new topology domain here, also consider adding it to the set of default values
|
||||
// for the scheduler's --failure-domain command-line argument.
|
||||
LabelHostname = "kubernetes.io/hostname"
|
||||
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
|
||||
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
|
||||
|
||||
LabelInstanceType = "beta.kubernetes.io/instance-type"
|
||||
)
|
||||
|
|
|
@ -220,6 +220,12 @@ func init() {
|
|||
Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus,
|
||||
Convert_v1_Pod_To_api_Pod,
|
||||
Convert_api_Pod_To_v1_Pod,
|
||||
Convert_v1_PodAffinity_To_api_PodAffinity,
|
||||
Convert_api_PodAffinity_To_v1_PodAffinity,
|
||||
Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm,
|
||||
Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm,
|
||||
Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity,
|
||||
Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity,
|
||||
Convert_v1_PodAttachOptions_To_api_PodAttachOptions,
|
||||
Convert_api_PodAttachOptions_To_v1_PodAttachOptions,
|
||||
Convert_v1_PodCondition_To_api_PodCondition,
|
||||
|
@ -312,6 +318,8 @@ func init() {
|
|||
Convert_api_VolumeMount_To_v1_VolumeMount,
|
||||
Convert_v1_VolumeSource_To_api_VolumeSource,
|
||||
Convert_api_VolumeSource_To_v1_VolumeSource,
|
||||
Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm,
|
||||
Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm,
|
||||
); err != nil {
|
||||
// if one of the conversion functions is malformed, detect it immediately.
|
||||
panic(err)
|
||||
|
@ -361,6 +369,24 @@ func autoConvert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s
|
|||
} else {
|
||||
out.NodeAffinity = nil
|
||||
}
|
||||
if in.PodAffinity != nil {
|
||||
in, out := &in.PodAffinity, &out.PodAffinity
|
||||
*out = new(api.PodAffinity)
|
||||
if err := Convert_v1_PodAffinity_To_api_PodAffinity(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PodAffinity = nil
|
||||
}
|
||||
if in.PodAntiAffinity != nil {
|
||||
in, out := &in.PodAntiAffinity, &out.PodAntiAffinity
|
||||
*out = new(api.PodAntiAffinity)
|
||||
if err := Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PodAntiAffinity = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -381,6 +407,24 @@ func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s
|
|||
} else {
|
||||
out.NodeAffinity = nil
|
||||
}
|
||||
if in.PodAffinity != nil {
|
||||
in, out := &in.PodAffinity, &out.PodAffinity
|
||||
*out = new(PodAffinity)
|
||||
if err := Convert_api_PodAffinity_To_v1_PodAffinity(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PodAffinity = nil
|
||||
}
|
||||
if in.PodAntiAffinity != nil {
|
||||
in, out := &in.PodAntiAffinity, &out.PodAntiAffinity
|
||||
*out = new(PodAntiAffinity)
|
||||
if err := Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PodAntiAffinity = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -5010,6 +5054,196 @@ func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) er
|
|||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*PodAffinity))(in)
|
||||
}
|
||||
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]api.PodAffinityTerm, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.RequiredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]api.WeightedPodAffinityTerm, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PreferredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error {
|
||||
return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*api.PodAffinity))(in)
|
||||
}
|
||||
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]PodAffinityTerm, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.RequiredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]WeightedPodAffinityTerm, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PreferredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error {
|
||||
return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*PodAffinityTerm))(in)
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(unversioned.LabelSelector)
|
||||
// TODO: Inefficient conversion - can we improve it?
|
||||
if err := s.Convert(*in, *out, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.LabelSelector = nil
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
} else {
|
||||
out.Namespaces = nil
|
||||
}
|
||||
out.TopologyKey = in.TopologyKey
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error {
|
||||
return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*api.PodAffinityTerm))(in)
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(unversioned.LabelSelector)
|
||||
// TODO: Inefficient conversion - can we improve it?
|
||||
if err := s.Convert(*in, *out, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.LabelSelector = nil
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
} else {
|
||||
out.Namespaces = nil
|
||||
}
|
||||
out.TopologyKey = in.TopologyKey
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error {
|
||||
return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*PodAntiAffinity))(in)
|
||||
}
|
||||
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]api.PodAffinityTerm, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.RequiredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]api.WeightedPodAffinityTerm, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PreferredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error {
|
||||
return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*api.PodAntiAffinity))(in)
|
||||
}
|
||||
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]PodAffinityTerm, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.RequiredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]WeightedPodAffinityTerm, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PreferredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error {
|
||||
return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*PodAttachOptions))(in)
|
||||
|
@ -7523,3 +7757,33 @@ func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *
|
|||
func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*WeightedPodAffinityTerm))(in)
|
||||
}
|
||||
out.Weight = int(in.Weight)
|
||||
if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error {
|
||||
return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*api.WeightedPodAffinityTerm))(in)
|
||||
}
|
||||
out.Weight = int32(in.Weight)
|
||||
if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error {
|
||||
return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s)
|
||||
}
|
||||
|
|
|
@ -126,6 +126,9 @@ func init() {
|
|||
DeepCopy_v1_PersistentVolumeSpec,
|
||||
DeepCopy_v1_PersistentVolumeStatus,
|
||||
DeepCopy_v1_Pod,
|
||||
DeepCopy_v1_PodAffinity,
|
||||
DeepCopy_v1_PodAffinityTerm,
|
||||
DeepCopy_v1_PodAntiAffinity,
|
||||
DeepCopy_v1_PodAttachOptions,
|
||||
DeepCopy_v1_PodCondition,
|
||||
DeepCopy_v1_PodExecOptions,
|
||||
|
@ -172,6 +175,7 @@ func init() {
|
|||
DeepCopy_v1_Volume,
|
||||
DeepCopy_v1_VolumeMount,
|
||||
DeepCopy_v1_VolumeSource,
|
||||
DeepCopy_v1_WeightedPodAffinityTerm,
|
||||
); err != nil {
|
||||
// if one of the deep copy functions is malformed, detect it immediately.
|
||||
panic(err)
|
||||
|
@ -196,6 +200,24 @@ func DeepCopy_v1_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) erro
|
|||
} else {
|
||||
out.NodeAffinity = nil
|
||||
}
|
||||
if in.PodAffinity != nil {
|
||||
in, out := in.PodAffinity, &out.PodAffinity
|
||||
*out = new(PodAffinity)
|
||||
if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PodAffinity = nil
|
||||
}
|
||||
if in.PodAntiAffinity != nil {
|
||||
in, out := in.PodAntiAffinity, &out.PodAntiAffinity
|
||||
*out = new(PodAntiAffinity)
|
||||
if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PodAntiAffinity = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1911,6 +1933,79 @@ func DeepCopy_v1_Pod(in Pod, out *Pod, c *conversion.Cloner) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_v1_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error {
|
||||
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]PodAffinityTerm, len(in))
|
||||
for i := range in {
|
||||
if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.RequiredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]WeightedPodAffinityTerm, len(in))
|
||||
for i := range in {
|
||||
if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PreferredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_v1_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error {
|
||||
if in.LabelSelector != nil {
|
||||
in, out := in.LabelSelector, &out.LabelSelector
|
||||
*out = new(unversioned.LabelSelector)
|
||||
if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.LabelSelector = nil
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := in.Namespaces, &out.Namespaces
|
||||
*out = make([]string, len(in))
|
||||
copy(*out, in)
|
||||
} else {
|
||||
out.Namespaces = nil
|
||||
}
|
||||
out.TopologyKey = in.TopologyKey
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_v1_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error {
|
||||
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]PodAffinityTerm, len(in))
|
||||
for i := range in {
|
||||
if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.RequiredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
*out = make([]WeightedPodAffinityTerm, len(in))
|
||||
for i := range in {
|
||||
if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PreferredDuringSchedulingIgnoredDuringExecution = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_v1_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error {
|
||||
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
|
||||
return err
|
||||
|
@ -2998,3 +3093,11 @@ func DeepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeepCopy_v1_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error {
|
||||
out.Weight = in.Weight
|
||||
if err := DeepCopy_v1_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -59,11 +59,16 @@ message AWSElasticBlockStoreVolumeSource {
|
|||
optional bool readOnly = 4;
|
||||
}
|
||||
|
||||
// Affinity is a group of affinity scheduling rules, currently
|
||||
// only node affinity, but in the future also inter-pod affinity.
|
||||
// Affinity is a group of affinity scheduling rules.
|
||||
message Affinity {
|
||||
// Describes node affinity scheduling rules for the pod.
|
||||
optional NodeAffinity nodeAffinity = 1;
|
||||
|
||||
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
|
||||
optional PodAffinity podAffinity = 2;
|
||||
|
||||
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
|
||||
optional PodAntiAffinity podAntiAffinity = 3;
|
||||
}
|
||||
|
||||
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
|
||||
|
@ -1726,6 +1731,96 @@ message Pod {
|
|||
optional PodStatus status = 3;
|
||||
}
|
||||
|
||||
// Pod affinity is a group of inter pod affinity scheduling rules.
|
||||
message PodAffinity {
|
||||
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
|
||||
// If the affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system will try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
|
||||
// If the affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system may or may not try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1;
|
||||
|
||||
// The scheduler will prefer to schedule pods to nodes that satisfy
|
||||
// the affinity expressions specified by this field, but it may choose
|
||||
// a node that violates one or more of the expressions. The node that is
|
||||
// most preferred is the one with the greatest sum of weights, i.e.
|
||||
// for each node that meets all of the scheduling requirements (resource
|
||||
// request, requiredDuringScheduling affinity expressions, etc.),
|
||||
// compute a sum by iterating through the elements of this field and adding
|
||||
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
// node(s) with the highest sum are the most preferred.
|
||||
repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
|
||||
}
|
||||
|
||||
// Defines a set of pods (namely those matching the labelSelector
|
||||
// relative to the given namespace(s)) that this pod should be
|
||||
// co-located (affinity) or not co-located (anti-affinity) with,
|
||||
// where co-located is defined as running on a node whose value of
|
||||
// the label with key <topologyKey> tches that of any node on which
|
||||
// a pod of the set of pods is running
|
||||
message PodAffinityTerm {
|
||||
// A label query over a set of resources, in this case pods.
|
||||
optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector labelSelector = 1;
|
||||
|
||||
// namespaces specifies which namespaces the labelSelector applies to (matches against);
|
||||
// nil list means "this pod's namespace," empty list means "all namespaces"
|
||||
// The json tag here is not "omitempty" since we need to distinguish nil and empty.
|
||||
// See https://golang.org/pkg/encoding/json/#Marshal for more details.
|
||||
repeated string namespaces = 2;
|
||||
|
||||
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
|
||||
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
|
||||
// whose value of the label with key topologyKey matches that of any node on which any of the
|
||||
// selected pods is running.
|
||||
// For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies"
|
||||
// ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains);
|
||||
// for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.
|
||||
optional string topologyKey = 3;
|
||||
}
|
||||
|
||||
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
|
||||
message PodAntiAffinity {
|
||||
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
|
||||
// If the anti-affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the anti-affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system will try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
|
||||
// If the anti-affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the anti-affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system may or may not try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1;
|
||||
|
||||
// The scheduler will prefer to schedule pods to nodes that satisfy
|
||||
// the anti-affinity expressions specified by this field, but it may choose
|
||||
// a node that violates one or more of the expressions. The node that is
|
||||
// most preferred is the one with the greatest sum of weights, i.e.
|
||||
// for each node that meets all of the scheduling requirements (resource
|
||||
// request, requiredDuringScheduling anti-affinity expressions, etc.),
|
||||
// compute a sum by iterating through the elements of this field and adding
|
||||
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
// node(s) with the highest sum are the most preferred.
|
||||
repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
|
||||
}
|
||||
|
||||
// PodAttachOptions is the query options to a Pod's remote attach call.
|
||||
// ---
|
||||
// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
|
||||
|
@ -2696,3 +2791,13 @@ message VolumeSource {
|
|||
optional ConfigMapVolumeSource configMap = 19;
|
||||
}
|
||||
|
||||
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
message WeightedPodAffinityTerm {
|
||||
// weight associated with matching the corresponding podAffinityTerm,
|
||||
// in the range 1-100.
|
||||
optional int32 weight = 1;
|
||||
|
||||
// Required. A pod affinity term, associated with the corresponding weight.
|
||||
optional PodAffinityTerm podAffinityTerm = 2;
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1394,11 +1394,109 @@ const (
|
|||
NodeSelectorOpLt NodeSelectorOperator = "Lt"
|
||||
)
|
||||
|
||||
// Affinity is a group of affinity scheduling rules, currently
|
||||
// only node affinity, but in the future also inter-pod affinity.
|
||||
// Affinity is a group of affinity scheduling rules.
|
||||
type Affinity struct {
|
||||
// Describes node affinity scheduling rules for the pod.
|
||||
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
|
||||
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
|
||||
PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
|
||||
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
|
||||
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
|
||||
}
|
||||
|
||||
// Pod affinity is a group of inter pod affinity scheduling rules.
|
||||
type PodAffinity struct {
|
||||
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
|
||||
// If the affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system will try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
|
||||
// If the affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system may or may not try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
|
||||
// The scheduler will prefer to schedule pods to nodes that satisfy
|
||||
// the affinity expressions specified by this field, but it may choose
|
||||
// a node that violates one or more of the expressions. The node that is
|
||||
// most preferred is the one with the greatest sum of weights, i.e.
|
||||
// for each node that meets all of the scheduling requirements (resource
|
||||
// request, requiredDuringScheduling affinity expressions, etc.),
|
||||
// compute a sum by iterating through the elements of this field and adding
|
||||
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
// node(s) with the highest sum are the most preferred.
|
||||
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
|
||||
}
|
||||
|
||||
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
|
||||
type PodAntiAffinity struct {
|
||||
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
|
||||
// If the anti-affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the anti-affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system will try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
|
||||
// If the anti-affinity requirements specified by this field are not met at
|
||||
// scheduling time, the pod will not be scheduled onto the node.
|
||||
// If the anti-affinity requirements specified by this field cease to be met
|
||||
// at some point during pod execution (e.g. due to a pod label update), the
|
||||
// system may or may not try to eventually evict the pod from its node.
|
||||
// When there are multiple elements, the lists of nodes corresponding to each
|
||||
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
|
||||
// The scheduler will prefer to schedule pods to nodes that satisfy
|
||||
// the anti-affinity expressions specified by this field, but it may choose
|
||||
// a node that violates one or more of the expressions. The node that is
|
||||
// most preferred is the one with the greatest sum of weights, i.e.
|
||||
// for each node that meets all of the scheduling requirements (resource
|
||||
// request, requiredDuringScheduling anti-affinity expressions, etc.),
|
||||
// compute a sum by iterating through the elements of this field and adding
|
||||
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
|
||||
// node(s) with the highest sum are the most preferred.
|
||||
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
|
||||
}
|
||||
|
||||
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
type WeightedPodAffinityTerm struct {
|
||||
// weight associated with matching the corresponding podAffinityTerm,
|
||||
// in the range 1-100.
|
||||
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
|
||||
// Required. A pod affinity term, associated with the corresponding weight.
|
||||
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
|
||||
}
|
||||
|
||||
// Defines a set of pods (namely those matching the labelSelector
|
||||
// relative to the given namespace(s)) that this pod should be
|
||||
// co-located (affinity) or not co-located (anti-affinity) with,
|
||||
// where co-located is defined as running on a node whose value of
|
||||
// the label with key <topologyKey> tches that of any node on which
|
||||
// a pod of the set of pods is running
|
||||
type PodAffinityTerm struct {
|
||||
// A label query over a set of resources, in this case pods.
|
||||
LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
|
||||
// namespaces specifies which namespaces the labelSelector applies to (matches against);
|
||||
// nil list means "this pod's namespace," empty list means "all namespaces"
|
||||
// The json tag here is not "omitempty" since we need to distinguish nil and empty.
|
||||
// See https://golang.org/pkg/encoding/json/#Marshal for more details.
|
||||
Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
|
||||
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
|
||||
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
|
||||
// whose value of the label with key topologyKey matches that of any node on which any of the
|
||||
// selected pods is running.
|
||||
// For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies"
|
||||
// ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains);
|
||||
// for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.
|
||||
TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"`
|
||||
}
|
||||
|
||||
// Node affinity is a group of node affinity scheduling rules.
|
||||
|
|
|
@ -40,8 +40,10 @@ func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_Affinity = map[string]string{
|
||||
"": "Affinity is a group of affinity scheduling rules, currently only node affinity, but in the future also inter-pod affinity.",
|
||||
"nodeAffinity": "Describes node affinity scheduling rules for the pod.",
|
||||
"": "Affinity is a group of affinity scheduling rules.",
|
||||
"nodeAffinity": "Describes node affinity scheduling rules for the pod.",
|
||||
"podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).",
|
||||
"podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).",
|
||||
}
|
||||
|
||||
func (Affinity) SwaggerDoc() map[string]string {
|
||||
|
@ -1086,6 +1088,37 @@ func (Pod) SwaggerDoc() map[string]string {
|
|||
return map_Pod
|
||||
}
|
||||
|
||||
var map_PodAffinity = map[string]string{
|
||||
"": "Pod affinity is a group of inter pod affinity scheduling rules.",
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
|
||||
}
|
||||
|
||||
func (PodAffinity) SwaggerDoc() map[string]string {
|
||||
return map_PodAffinity
|
||||
}
|
||||
|
||||
var map_PodAffinityTerm = map[string]string{
|
||||
"": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> tches that of any node on which a pod of the set of pods is running",
|
||||
"labelSelector": "A label query over a set of resources, in this case pods.",
|
||||
"namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); nil list means \"this pod's namespace,\" empty list means \"all namespaces\" The json tag here is not \"omitempty\" since we need to distinguish nil and empty. See https://golang.org/pkg/encoding/json/#Marshal for more details.",
|
||||
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.",
|
||||
}
|
||||
|
||||
func (PodAffinityTerm) SwaggerDoc() map[string]string {
|
||||
return map_PodAffinityTerm
|
||||
}
|
||||
|
||||
var map_PodAntiAffinity = map[string]string{
|
||||
"": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
|
||||
}
|
||||
|
||||
func (PodAntiAffinity) SwaggerDoc() map[string]string {
|
||||
return map_PodAntiAffinity
|
||||
}
|
||||
|
||||
var map_PodAttachOptions = map[string]string{
|
||||
"": "PodAttachOptions is the query options to a Pod's remote attach call.",
|
||||
"stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.",
|
||||
|
@ -1628,4 +1661,14 @@ func (VolumeSource) SwaggerDoc() map[string]string {
|
|||
return map_VolumeSource
|
||||
}
|
||||
|
||||
var map_WeightedPodAffinityTerm = map[string]string{
|
||||
"": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)",
|
||||
"weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.",
|
||||
"podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.",
|
||||
}
|
||||
|
||||
func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string {
|
||||
return map_WeightedPodAffinityTerm
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
apiservice "k8s.io/kubernetes/pkg/api/service"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
@ -74,26 +75,6 @@ var BannedOwners = map[unversioned.GroupVersionKind]struct{}{
|
|||
v1.SchemeGroupVersion.WithKind("Event"): {},
|
||||
}
|
||||
|
||||
func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if !validation.IsQualifiedName(labelName) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, labelName, qualifiedNameErrorMsg))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateLabels validates that a set of labels are correctly defined.
|
||||
func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for k, v := range labels {
|
||||
allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
|
||||
if !validation.IsValidLabelValue(v) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, v, labelValueErrorMsg))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateHasLabel requires that api.ObjectMeta has a Label with key and expectedValue
|
||||
func ValidateHasLabel(meta api.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
@ -361,7 +342,7 @@ func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn Val
|
|||
}
|
||||
}
|
||||
allErrs = append(allErrs, ValidateNonnegativeField(meta.Generation, fldPath.Child("generation"))...)
|
||||
allErrs = append(allErrs, ValidateLabels(meta.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(meta.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, ValidateAnnotations(meta.Annotations, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidateOwnerReferences(meta.OwnerReferences, fldPath.Child("ownerReferences"))...)
|
||||
|
||||
|
@ -416,7 +397,7 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.P
|
|||
allErrs = append(allErrs, ValidateImmutableField(newMeta.CreationTimestamp, oldMeta.CreationTimestamp, fldPath.Child("creationTimestamp"))...)
|
||||
allErrs = append(allErrs, ValidateImmutableField(newMeta.Finalizers, oldMeta.Finalizers, fldPath.Child("finalizers"))...)
|
||||
|
||||
allErrs = append(allErrs, ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, ValidateAnnotations(newMeta.Annotations, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidateOwnerReferences(newMeta.OwnerReferences, fldPath.Child("ownerReferences"))...)
|
||||
|
||||
|
@ -1448,7 +1429,7 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
|
|||
allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...)
|
||||
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
|
||||
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
|
||||
allErrs = append(allErrs, ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...)
|
||||
allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
|
||||
if len(spec.ServiceAccountName) > 0 {
|
||||
|
@ -1500,7 +1481,7 @@ func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *fi
|
|||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator"))
|
||||
}
|
||||
allErrs = append(allErrs, ValidateLabelName(rq.Key, fldPath.Child("key"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
|
@ -1547,6 +1528,87 @@ func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPa
|
|||
return allErrs
|
||||
}
|
||||
|
||||
// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data
|
||||
func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...)
|
||||
for _, name := range podAffinityTerm.Namespaces {
|
||||
if ok, _ := ValidateNamespaceName(name, false); !ok {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, DNS1123LabelErrorMsg))
|
||||
}
|
||||
}
|
||||
if !allowEmptyTopologyKey && len(podAffinityTerm.TopologyKey) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can only be empty for PreferredDuringScheduling pod anti affinity"))
|
||||
}
|
||||
if len(podAffinityTerm.TopologyKey) != 0 {
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data
|
||||
func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for i, podAffinityTerm := range podAffinityTerms {
|
||||
allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowEmptyTopologyKey, fldPath.Index(i))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data
|
||||
func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for j, weightedTerm := range weightedPodAffinityTerms {
|
||||
if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100"))
|
||||
}
|
||||
allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowEmptyTopologyKey, fldPath.Index(j).Child("podAffinityTerm"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data
|
||||
func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
|
||||
// if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||
// allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
|
||||
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
|
||||
//}
|
||||
if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
// empty topologyKey is not allowed for hard pod anti-affinity
|
||||
allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false,
|
||||
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
// empty topologyKey is allowed for soft pod anti-affinity
|
||||
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, true,
|
||||
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePodAffinity tests that the specified podAffinity fields have valid data
|
||||
func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
|
||||
// if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||
// allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
|
||||
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
|
||||
//}
|
||||
if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
// empty topologyKey is not allowed for hard pod affinity
|
||||
allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false,
|
||||
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
// empty topologyKey is not allowed for soft pod affinity
|
||||
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, false,
|
||||
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data
|
||||
func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
@ -1557,23 +1619,29 @@ func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *fi
|
|||
return allErrs
|
||||
}
|
||||
|
||||
affinityFldPath := fldPath.Child(api.AffinityAnnotationKey)
|
||||
if affinity.NodeAffinity != nil {
|
||||
na := affinity.NodeAffinity
|
||||
|
||||
naFldPath := affinityFldPath.Child("nodeAffinity")
|
||||
// TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
|
||||
// if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||
// allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
|
||||
// allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, naFldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
|
||||
// }
|
||||
|
||||
if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
|
||||
if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
|
||||
allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
|
||||
allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
}
|
||||
if affinity.PodAffinity != nil {
|
||||
allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, affinityFldPath.Child("podAffinity"))...)
|
||||
}
|
||||
if affinity.PodAntiAffinity != nil {
|
||||
allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, affinityFldPath.Child("podAntiAffinity"))...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
@ -1749,7 +1817,7 @@ func ValidateService(service *api.Service) field.ErrorList {
|
|||
}
|
||||
|
||||
if service.Spec.Selector != nil {
|
||||
allErrs = append(allErrs, ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...)
|
||||
}
|
||||
|
||||
if len(service.Spec.SessionAffinity) == 0 {
|
||||
|
@ -1968,7 +2036,7 @@ func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldP
|
|||
// ValidatePodTemplateSpec validates the spec of a pod template
|
||||
func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, ValidateLabels(spec.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...)
|
||||
|
|
|
@ -282,67 +282,6 @@ func TestValidateObjectMetaUpdateDisallowsUpdatingFinalizers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestValidateLabels(t *testing.T) {
|
||||
successCases := []map[string]string{
|
||||
{"simple": "bar"},
|
||||
{"now-with-dashes": "bar"},
|
||||
{"1-starts-with-num": "bar"},
|
||||
{"1234": "bar"},
|
||||
{"simple/simple": "bar"},
|
||||
{"now-with-dashes/simple": "bar"},
|
||||
{"now-with-dashes/now-with-dashes": "bar"},
|
||||
{"now.with.dots/simple": "bar"},
|
||||
{"now-with.dashes-and.dots/simple": "bar"},
|
||||
{"1-num.2-num/3-num": "bar"},
|
||||
{"1234/5678": "bar"},
|
||||
{"1.2.3.4/5678": "bar"},
|
||||
{"UpperCaseAreOK123": "bar"},
|
||||
{"goodvalue": "123_-.BaR"},
|
||||
}
|
||||
for i := range successCases {
|
||||
errs := ValidateLabels(successCases[i], field.NewPath("field"))
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("case[%d] expected success, got %#v", i, errs)
|
||||
}
|
||||
}
|
||||
|
||||
labelNameErrorCases := []map[string]string{
|
||||
{"nospecialchars^=@": "bar"},
|
||||
{"cantendwithadash-": "bar"},
|
||||
{"only/one/slash": "bar"},
|
||||
{strings.Repeat("a", 254): "bar"},
|
||||
}
|
||||
for i := range labelNameErrorCases {
|
||||
errs := ValidateLabels(labelNameErrorCases[i], field.NewPath("field"))
|
||||
if len(errs) != 1 {
|
||||
t.Errorf("case[%d] expected failure", i)
|
||||
} else {
|
||||
detail := errs[0].Detail
|
||||
if detail != qualifiedNameErrorMsg {
|
||||
t.Errorf("error detail %s should be equal %s", detail, qualifiedNameErrorMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
labelValueErrorCases := []map[string]string{
|
||||
{"toolongvalue": strings.Repeat("a", 64)},
|
||||
{"backslashesinvalue": "some\\bad\\value"},
|
||||
{"nocommasallowed": "bad,value"},
|
||||
{"strangecharsinvalue": "?#$notsogood"},
|
||||
}
|
||||
for i := range labelValueErrorCases {
|
||||
errs := ValidateLabels(labelValueErrorCases[i], field.NewPath("field"))
|
||||
if len(errs) != 1 {
|
||||
t.Errorf("case[%d] expected failure", i)
|
||||
} else {
|
||||
detail := errs[0].Detail
|
||||
if detail != labelValueErrorMsg {
|
||||
t.Errorf("error detail %s should be equal %s", detail, labelValueErrorMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAnnotations(t *testing.T) {
|
||||
successCases := []map[string]string{
|
||||
{"simple": "bar"},
|
||||
|
@ -1969,52 +1908,6 @@ func TestValidatePod(t *testing.T) {
|
|||
NodeName: "foobar",
|
||||
},
|
||||
},
|
||||
{ // Serialized affinity requirements in annotations.
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
// TODO: Uncomment and move this block into Annotations map once
|
||||
// RequiredDuringSchedulingRequiredDuringExecution is implemented
|
||||
// "requiredDuringSchedulingRequiredDuringExecution": {
|
||||
// "nodeSelectorTerms": [{
|
||||
// "matchExpressions": [{
|
||||
// "key": "key1",
|
||||
// "operator": "Exists"
|
||||
// }]
|
||||
// }]
|
||||
// },
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "In",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
}]
|
||||
},
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"weight": 10,
|
||||
"preference": {"matchExpressions": [
|
||||
{
|
||||
"key": "foo",
|
||||
"operator": "In", "values": ["bar"]
|
||||
}
|
||||
]}
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, pod := range successCases {
|
||||
if errs := ValidatePod(&pod); len(errs) != 0 {
|
||||
|
@ -2059,7 +1952,177 @@ func TestValidatePod(t *testing.T) {
|
|||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
},
|
||||
},
|
||||
"invalid json of affinity in pod annotations": {
|
||||
}
|
||||
for k, v := range errorCases {
|
||||
if errs := ValidatePod(&v); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAffinity(t *testing.T) {
|
||||
successCases := []api.Pod{
|
||||
{ // Serialized affinity requirements in annotations.
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
// TODO: Uncomment and move this block into Annotations map once
|
||||
// RequiredDuringSchedulingRequiredDuringExecution is implemented
|
||||
// "requiredDuringSchedulingRequiredDuringExecution": {
|
||||
// "nodeSelectorTerms": [{
|
||||
// "matchExpressions": [{
|
||||
// "key": "key1",
|
||||
// "operator": "Exists"
|
||||
// }]
|
||||
// }]
|
||||
// },
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "In",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
}]
|
||||
},
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"weight": 10,
|
||||
"preference": {"matchExpressions": [
|
||||
{
|
||||
"key": "foo",
|
||||
"operator": "In", "values": ["bar"]
|
||||
}
|
||||
]}
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // Serialized pod affinity in affinity requirements in annotations.
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
// TODO: Uncomment and move this block into Annotations map once
|
||||
// RequiredDuringSchedulingRequiredDuringExecution is implemented
|
||||
// "requiredDuringSchedulingRequiredDuringExecution": [{
|
||||
// "labelSelector": {
|
||||
// "matchExpressions": [{
|
||||
// "key": "key2",
|
||||
// "operator": "In",
|
||||
// "values": ["value1", "value2"]
|
||||
// }]
|
||||
// },
|
||||
// "namespaces":["ns"],
|
||||
// "topologyKey": "zone"
|
||||
// }]
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "In",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "zone",
|
||||
"namespaces": ["ns"]
|
||||
}],
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 10,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "NotIn",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": ["ns"],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // Serialized pod anti affinity with different Label Operators in affinity requirements in annotations.
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
// TODO: Uncomment and move this block into Annotations map once
|
||||
// RequiredDuringSchedulingRequiredDuringExecution is implemented
|
||||
// "requiredDuringSchedulingRequiredDuringExecution": [{
|
||||
// "labelSelector": {
|
||||
// "matchExpressions": [{
|
||||
// "key": "key2",
|
||||
// "operator": "In",
|
||||
// "values": ["value1", "value2"]
|
||||
// }]
|
||||
// },
|
||||
// "namespaces":["ns"],
|
||||
// "topologyKey": "zone"
|
||||
// }]
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "Exists"
|
||||
}]
|
||||
},
|
||||
"topologyKey": "zone",
|
||||
"namespaces": ["ns"]
|
||||
}],
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 10,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "DoesNotExist"
|
||||
}]
|
||||
},
|
||||
"namespaces": ["ns"],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, pod := range successCases {
|
||||
if errs := ValidatePod(&pod); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
errorCases := map[string]api.Pod{
|
||||
"invalid json of node affinity in pod annotations": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
|
@ -2077,7 +2140,7 @@ func TestValidatePod(t *testing.T) {
|
|||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
"invalid node selector requirement in affinity in pod annotations, operator can't be null": {
|
||||
"invalid node selector requirement in node affinity in pod annotations, operator can't be null": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
|
@ -2098,7 +2161,7 @@ func TestValidatePod(t *testing.T) {
|
|||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
"invalid preferredSchedulingTerm in affinity in pod annotations, weight should be in range 1-100": {
|
||||
"invalid preferredSchedulingTerm in node affinity in pod annotations, weight should be in range 1-100": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
|
@ -2164,6 +2227,209 @@ func TestValidatePod(t *testing.T) {
|
|||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
"invalid weight in preferredDuringSchedulingIgnoredDuringExecution in pod affinity annotations, weight should be in range 1-100": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 109,
|
||||
"podAffinityTerm":
|
||||
{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "NotIn",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": ["ns"],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
"invalid labelSelector in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, values should be empty if the operator is Exists": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 10,
|
||||
"podAffinityTerm":
|
||||
{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "Exists",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": ["ns"],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
"invalid name space in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, name space shouldbe valid": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 10,
|
||||
"podAffinityTerm":
|
||||
{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "Exists",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": ["INVALID_NAMESPACE"],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
"invalid labelOperator in preferredDuringSchedulingIgnoredDuringExecution in podantiaffinity annotations, labelOperator should be proper": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 10,
|
||||
"podAffinityTerm":
|
||||
{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "WrongOp",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": ["ns"],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
"invalid pod affinity, empty topologyKey is not allowed for hard pod affinity": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 10,
|
||||
"podAffinityTerm":
|
||||
{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "In",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": ["ns"],
|
||||
"topologyKey": ""
|
||||
}
|
||||
}]}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
"invalid pod anti-affinity, empty topologyKey is not allowed for hard pod anti-affinity": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 10,
|
||||
"podAffinityTerm":
|
||||
{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "In",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": ["ns"],
|
||||
"topologyKey": ""
|
||||
}
|
||||
}]}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
"invalid pod anti-affinity, empty topologyKey is not allowed for soft pod affinity": {
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 10,
|
||||
"podAffinityTerm":
|
||||
{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "key2",
|
||||
"operator": "In",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": ["ns"],
|
||||
"topologyKey": ""
|
||||
}
|
||||
}]}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
}
|
||||
for k, v := range errorCases {
|
||||
if errs := ValidatePod(&v); len(errs) == 0 {
|
||||
|
|
|
@ -56,7 +56,7 @@ func ValidatePodTemplateSpecForPetSet(template *api.PodTemplateSpec, selector la
|
|||
// fail. We should really check that the union of the given volumes and volumeClaims match
|
||||
// volume mounts in the containers.
|
||||
// allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateLabels(template.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(template.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateAnnotations(template.Annotations, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidatePodSpecificAnnotations(template.Annotations, fldPath.Child("annotations"))...)
|
||||
}
|
||||
|
|
|
@ -189,6 +189,8 @@ func DeepCopy_componentconfig_KubeSchedulerConfiguration(in KubeSchedulerConfigu
|
|||
out.KubeAPIQPS = in.KubeAPIQPS
|
||||
out.KubeAPIBurst = in.KubeAPIBurst
|
||||
out.SchedulerName = in.SchedulerName
|
||||
out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
|
||||
out.FailureDomains = in.FailureDomains
|
||||
if err := DeepCopy_componentconfig_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -5051,16 +5051,16 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [12]bool
|
||||
var yyq2 [14]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[10] = x.Kind != ""
|
||||
yyq2[11] = x.APIVersion != ""
|
||||
yyq2[12] = x.Kind != ""
|
||||
yyq2[13] = x.APIVersion != ""
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(12)
|
||||
r.EncodeArrayStart(14)
|
||||
} else {
|
||||
yynn2 = 10
|
||||
yynn2 = 12
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
|
@ -5242,20 +5242,58 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yy31 := &x.LeaderElection
|
||||
yy31.CodecEncodeSelf(e)
|
||||
yym31 := z.EncBinary()
|
||||
_ = yym31
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("hardPodAffinitySymmetricWeight"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym32 := z.EncBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yym34 := z.EncBinary()
|
||||
_ = yym34
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("failureDomains"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym35 := z.EncBinary()
|
||||
_ = yym35
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yy37 := &x.LeaderElection
|
||||
yy37.CodecEncodeSelf(e)
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("leaderElection"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yy33 := &x.LeaderElection
|
||||
yy33.CodecEncodeSelf(e)
|
||||
yy39 := &x.LeaderElection
|
||||
yy39.CodecEncodeSelf(e)
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[10] {
|
||||
yym36 := z.EncBinary()
|
||||
_ = yym36
|
||||
if yyq2[12] {
|
||||
yym42 := z.EncBinary()
|
||||
_ = yym42
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
|
@ -5264,12 +5302,12 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[10] {
|
||||
if yyq2[12] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("kind"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym37 := z.EncBinary()
|
||||
_ = yym37
|
||||
yym43 := z.EncBinary()
|
||||
_ = yym43
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
|
@ -5278,9 +5316,9 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[11] {
|
||||
yym39 := z.EncBinary()
|
||||
_ = yym39
|
||||
if yyq2[13] {
|
||||
yym45 := z.EncBinary()
|
||||
_ = yym45
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
|
@ -5289,12 +5327,12 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[11] {
|
||||
if yyq2[13] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym40 := z.EncBinary()
|
||||
_ = yym40
|
||||
yym46 := z.EncBinary()
|
||||
_ = yym46
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
|
@ -5416,12 +5454,24 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.
|
|||
} else {
|
||||
x.SchedulerName = string(r.DecodeString())
|
||||
}
|
||||
case "hardPodAffinitySymmetricWeight":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.HardPodAffinitySymmetricWeight = 0
|
||||
} else {
|
||||
x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
case "failureDomains":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FailureDomains = ""
|
||||
} else {
|
||||
x.FailureDomains = string(r.DecodeString())
|
||||
}
|
||||
case "leaderElection":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.LeaderElection = LeaderElectionConfiguration{}
|
||||
} else {
|
||||
yyv13 := &x.LeaderElection
|
||||
yyv13.CodecDecodeSelf(d)
|
||||
yyv15 := &x.LeaderElection
|
||||
yyv15.CodecDecodeSelf(d)
|
||||
}
|
||||
case "kind":
|
||||
if r.TryDecodeAsNil() {
|
||||
|
@ -5446,16 +5496,16 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj16 int
|
||||
var yyb16 bool
|
||||
var yyhl16 bool = l >= 0
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
var yyj18 int
|
||||
var yyb18 bool
|
||||
var yyhl18 bool = l >= 0
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5465,13 +5515,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.Port = int32(r.DecodeInt(32))
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5481,13 +5531,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.Address = string(r.DecodeString())
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5497,13 +5547,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.AlgorithmProvider = string(r.DecodeString())
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5513,13 +5563,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.PolicyConfigFile = string(r.DecodeString())
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5529,13 +5579,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.EnableProfiling = bool(r.DecodeBool())
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5545,13 +5595,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.ContentType = string(r.DecodeString())
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5561,13 +5611,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.KubeAPIQPS = float32(r.DecodeFloat(true))
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5577,13 +5627,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.KubeAPIBurst = int32(r.DecodeInt(32))
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5593,13 +5643,45 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.SchedulerName = string(r.DecodeString())
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.HardPodAffinitySymmetricWeight = 0
|
||||
} else {
|
||||
x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FailureDomains = ""
|
||||
} else {
|
||||
x.FailureDomains = string(r.DecodeString())
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5607,16 +5689,16 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
if r.TryDecodeAsNil() {
|
||||
x.LeaderElection = LeaderElectionConfiguration{}
|
||||
} else {
|
||||
yyv26 := &x.LeaderElection
|
||||
yyv26.CodecDecodeSelf(d)
|
||||
yyv30 := &x.LeaderElection
|
||||
yyv30.CodecDecodeSelf(d)
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5626,13 +5708,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
} else {
|
||||
x.Kind = string(r.DecodeString())
|
||||
}
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -5643,17 +5725,17 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197
|
|||
x.APIVersion = string(r.DecodeString())
|
||||
}
|
||||
for {
|
||||
yyj16++
|
||||
if yyhl16 {
|
||||
yyb16 = yyj16 > l
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb16 = r.CheckBreak()
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb16 {
|
||||
if yyb18 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj16-1, "")
|
||||
z.DecStructFieldNotFound(yyj18-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
|
|
@ -372,6 +372,12 @@ type KubeSchedulerConfiguration struct {
|
|||
// will be processed by this scheduler, based on pod's annotation with
|
||||
// key 'scheduler.alpha.kubernetes.io/name'.
|
||||
SchedulerName string `json:"schedulerName"`
|
||||
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
|
||||
HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"`
|
||||
// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
FailureDomains string `json:"failureDomains"`
|
||||
// leaderElection defines the configuration of leader election client.
|
||||
LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
|
||||
}
|
||||
|
|
|
@ -163,6 +163,8 @@ func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSche
|
|||
out.KubeAPIQPS = in.KubeAPIQPS
|
||||
out.KubeAPIBurst = int32(in.KubeAPIBurst)
|
||||
out.SchedulerName = in.SchedulerName
|
||||
out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
|
||||
out.FailureDomains = in.FailureDomains
|
||||
if err := Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -191,6 +193,8 @@ func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSche
|
|||
out.KubeAPIQPS = in.KubeAPIQPS
|
||||
out.KubeAPIBurst = int(in.KubeAPIBurst)
|
||||
out.SchedulerName = in.SchedulerName
|
||||
out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
|
||||
out.FailureDomains = in.FailureDomains
|
||||
if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -98,6 +98,8 @@ func DeepCopy_v1alpha1_KubeSchedulerConfiguration(in KubeSchedulerConfiguration,
|
|||
out.KubeAPIQPS = in.KubeAPIQPS
|
||||
out.KubeAPIBurst = in.KubeAPIBurst
|
||||
out.SchedulerName = in.SchedulerName
|
||||
out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
|
||||
out.FailureDomains = in.FailureDomains
|
||||
if err := DeepCopy_v1alpha1_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -89,6 +89,12 @@ func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) {
|
|||
if obj.SchedulerName == "" {
|
||||
obj.SchedulerName = api.DefaultSchedulerName
|
||||
}
|
||||
if obj.HardPodAffinitySymmetricWeight == 0 {
|
||||
obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight
|
||||
}
|
||||
if obj.FailureDomains == "" {
|
||||
obj.FailureDomains = api.DefaultFailureDomains
|
||||
}
|
||||
}
|
||||
|
||||
func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) {
|
||||
|
|
|
@ -105,6 +105,12 @@ type KubeSchedulerConfiguration struct {
|
|||
// will be processed by this scheduler, based on pod's annotation with
|
||||
// key 'scheduler.alpha.kubernetes.io/name'.
|
||||
SchedulerName string `json:"schedulerName"`
|
||||
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
|
||||
HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"`
|
||||
// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
FailureDomains string `json:"failureDomains"`
|
||||
// leaderElection defines the configuration of leader election client.
|
||||
LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
|
||||
}
|
||||
|
|
|
@ -67,5 +67,9 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'")
|
||||
fs.IntVar(&s.HardPodAffinitySymmetricWeight, "hard-pod-affinity-symmetric-weight", api.DefaultHardPodAffinitySymmetricWeight,
|
||||
"RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding "+
|
||||
"to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule.")
|
||||
fs.StringVar(&s.FailureDomains, "failure-domains", api.DefaultFailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.")
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ func Run(s *options.SchedulerServer) error {
|
|||
glog.Fatal(server.ListenAndServe())
|
||||
}()
|
||||
|
||||
configFactory := factory.NewConfigFactory(kubeClient, s.SchedulerName)
|
||||
configFactory := factory.NewConfigFactory(kubeClient, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains)
|
||||
config, err := createConfig(s, configFactory)
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -30,6 +30,7 @@ var (
|
|||
ErrDiskConflict = newPredicateFailureError("NoDiskConflict")
|
||||
ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict")
|
||||
ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector")
|
||||
ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity")
|
||||
ErrPodNotMatchHostName = newPredicateFailureError("HostName")
|
||||
ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts")
|
||||
ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence")
|
||||
|
|
|
@ -19,14 +19,14 @@ package predicates
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
)
|
||||
|
||||
type NodeInfo interface {
|
||||
|
@ -723,3 +723,198 @@ func GeneralPredicates(pod *api.Pod, nodeName string, nodeInfo *schedulercache.N
|
|||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type PodAffinityChecker struct {
|
||||
info NodeInfo
|
||||
podLister algorithm.PodLister
|
||||
failureDomains priorityutil.Topologies
|
||||
}
|
||||
|
||||
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister, failureDomains []string) algorithm.FitPredicate {
|
||||
checker := &PodAffinityChecker{
|
||||
info: info,
|
||||
podLister: podLister,
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: failureDomains},
|
||||
}
|
||||
return checker.InterPodAffinityMatches
|
||||
}
|
||||
|
||||
func (checker *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
node, err := checker.info.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
allPods, err := checker.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if checker.NodeMatchPodAffinityAntiAffinity(pod, allPods, node) {
|
||||
return true, nil
|
||||
}
|
||||
return false, ErrPodAffinityNotMatch
|
||||
}
|
||||
|
||||
// AnyPodMatchesPodAffinityTerm checks if any of given pods can match the specific podAffinityTerm.
|
||||
func (checker *PodAffinityChecker) AnyPodMatchesPodAffinityTerm(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAffinityTerm api.PodAffinityTerm) (bool, error) {
|
||||
for _, ep := range allPods {
|
||||
match, err := checker.failureDomains.CheckIfPodMatchPodAffinityTerm(ep, pod, podAffinityTerm,
|
||||
func(ep *api.Pod) (*api.Node, error) { return checker.info.GetNodeInfo(ep.Spec.NodeName) },
|
||||
func(pod *api.Pod) (*api.Node, error) { return node, nil },
|
||||
)
|
||||
if err != nil || match {
|
||||
return match, err
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Checks whether the given node has pods which satisfy all the required pod affinity scheduling rules.
|
||||
// If node has pods which satisfy all the required pod affinity scheduling rules then return true.
|
||||
func (checker *PodAffinityChecker) NodeMatchesHardPodAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAffinity *api.PodAffinity) bool {
|
||||
var podAffinityTerms []api.PodAffinityTerm
|
||||
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
podAffinityTerms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(podAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// podAffinityTerms = append(podAffinityTerms, podAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
|
||||
for _, podAffinityTerm := range podAffinityTerms {
|
||||
podAffinityTermMatches, err := checker.AnyPodMatchesPodAffinityTerm(pod, allPods, node, podAffinityTerm)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, an error ocurred when checking existing pods on the node for PodAffinityTerm %v err: %v",
|
||||
podName(pod), node.Name, podAffinityTerm, err)
|
||||
return false
|
||||
}
|
||||
|
||||
if !podAffinityTermMatches {
|
||||
// TODO: Think about whether this can be simplified once we have controllerRef
|
||||
// Check if it is in special case that the requiredDuringScheduling affinity requirement can be disregarded.
|
||||
// If the requiredDuringScheduling affinity requirement matches a pod's own labels and namespace, and there are no other such pods
|
||||
// anywhere, then disregard the requirement.
|
||||
// This allows rules like "schedule all of the pods of this collection to the same zone" to not block forever
|
||||
// because the first pod of the collection can't be scheduled.
|
||||
names := priorityutil.GetNamespacesFromPodAffinityTerm(pod, podAffinityTerm)
|
||||
labelSelector, err := unversioned.LabelSelectorAsSelector(podAffinityTerm.LabelSelector)
|
||||
if err != nil || !names.Has(pod.Namespace) || !labelSelector.Matches(labels.Set(pod.Labels)) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because none of the existing pods on this node satisfy the PodAffinityTerm %v, err: %+v",
|
||||
podName(pod), node.Name, podAffinityTerm, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// the affinity is to put the pod together with other pods from its same service or controller
|
||||
filteredPods := priorityutil.FilterPodsByNameSpaces(names, allPods)
|
||||
for _, filteredPod := range filteredPods {
|
||||
// if found an existing pod from same service or RC,
|
||||
// the affinity scheduling rules cannot be disregarded.
|
||||
if labelSelector.Matches(labels.Set(filteredPod.Labels)) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because none of the existing pods on this node satisfy the PodAffinityTerm %v",
|
||||
podName(pod), node.Name, podAffinityTerm)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// all the required pod affinity scheduling rules satisfied
|
||||
glog.V(10).Infof("All the required pod affinity scheduling rules are satisfied for Pod %+v, on node %v", podName(pod), node.Name)
|
||||
return true
|
||||
}
|
||||
|
||||
// Checks whether the given node has pods which satisfy all the
|
||||
// required pod anti-affinity scheduling rules.
|
||||
// Also checks whether putting the pod onto the node would break
|
||||
// any anti-affinity scheduling rules indicated by existing pods.
|
||||
// If node has pods which satisfy all the required pod anti-affinity
|
||||
// scheduling rules and scheduling the pod onto the node won't
|
||||
// break any existing pods' anti-affinity rules, then return true.
|
||||
func (checker *PodAffinityChecker) NodeMatchesHardPodAntiAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAntiAffinity *api.PodAntiAffinity) bool {
|
||||
var podAntiAffinityTerms []api.PodAffinityTerm
|
||||
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
podAntiAffinityTerms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// podAntiAffinityTerms = append(podAntiAffinityTerms, podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
|
||||
// foreach element podAntiAffinityTerm of podAntiAffinityTerms
|
||||
// if the pod matches the term (breaks the anti-affinity),
|
||||
// don't schedule the pod onto this node.
|
||||
for _, podAntiAffinityTerm := range podAntiAffinityTerms {
|
||||
podAntiAffinityTermMatches, err := checker.AnyPodMatchesPodAffinityTerm(pod, allPods, node, podAntiAffinityTerm)
|
||||
if err != nil || podAntiAffinityTermMatches == true {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because not all the existing pods on this node satisfy the PodAntiAffinityTerm %v, err: %v",
|
||||
podName(pod), node.Name, podAntiAffinityTerm, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Check if scheduling the pod onto this node would break
|
||||
// any anti-affinity rules indicated by the existing pods on the node.
|
||||
// If it would break, system should not schedule pod onto this node.
|
||||
for _, ep := range allPods {
|
||||
epAffinity, err := api.GetAffinityFromPodAnnotations(ep.Annotations)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err)
|
||||
return false
|
||||
}
|
||||
if epAffinity.PodAntiAffinity != nil {
|
||||
var epAntiAffinityTerms []api.PodAffinityTerm
|
||||
if len(epAffinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
epAntiAffinityTerms = epAffinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(epAffinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// epAntiAffinityTerms = append(epAntiAffinityTerms, epAffinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
|
||||
for _, epAntiAffinityTerm := range epAntiAffinityTerms {
|
||||
labelSelector, err := unversioned.LabelSelectorAsSelector(epAntiAffinityTerm.LabelSelector)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to get label selector from anti-affinityterm %+v of existing pod %+v, err: %+v", epAntiAffinityTerm, podName(pod), err)
|
||||
return false
|
||||
}
|
||||
|
||||
names := priorityutil.GetNamespacesFromPodAffinityTerm(ep, epAntiAffinityTerm)
|
||||
if (len(names) == 0 || names.Has(pod.Namespace)) && labelSelector.Matches(labels.Set(pod.Labels)) {
|
||||
epNode, err := checker.info.GetNodeInfo(ep.Spec.NodeName)
|
||||
if err != nil || checker.failureDomains.NodesHaveSameTopologyKey(node, epNode, epAntiAffinityTerm.TopologyKey) {
|
||||
glog.V(10).Infof("Cannot schedule Pod %+v, onto node %v because the pod would break the PodAntiAffinityTerm %+v, of existing pod %+v, err: %v",
|
||||
podName(pod), node.Name, epAntiAffinityTerm, podName(ep), err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// all the required pod anti-affinity scheduling rules are satisfied
|
||||
glog.V(10).Infof("Can schedule Pod %+v, on node %v because all the required pod anti-affinity scheduling rules are satisfied", podName(pod), node.Name)
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeMatchPodAffinityAntiAffinity checks if the node matches
|
||||
// the requiredDuringScheduling affinity/anti-affinity rules indicated by the pod.
|
||||
func (checker *PodAffinityChecker) NodeMatchPodAffinityAntiAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node) bool {
|
||||
// Parse required affinity scheduling rules.
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err)
|
||||
return false
|
||||
}
|
||||
|
||||
// check if the current node match the inter-pod affinity scheduling rules.
|
||||
if affinity.PodAffinity != nil {
|
||||
if !checker.NodeMatchesHardPodAffinity(pod, allPods, node, affinity.PodAffinity) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// check if the current node match the inter-pod anti-affinity scheduling rules.
|
||||
if affinity.PodAntiAffinity != nil {
|
||||
if !checker.NodeMatchesHardPodAntiAffinity(pod, allPods, node, affinity.PodAntiAffinity) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -20,12 +20,14 @@ import (
|
|||
"fmt"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/util/codeinspector"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
|
@ -1564,3 +1566,673 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterPodAffinity(t *testing.T) {
|
||||
podLabel := map[string]string{"service": "securityscan"}
|
||||
labels1 := map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z11",
|
||||
}
|
||||
podLabel2 := map[string]string{"security": "S1"}
|
||||
node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labels1}}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
node api.Node
|
||||
fits bool
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: new(api.Pod),
|
||||
node: node1,
|
||||
fits: true,
|
||||
test: "A pod that has no required pod affinity scheduling rules can schedule onto a node with no existing pods",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
|
||||
node: node1,
|
||||
fits: true,
|
||||
test: "satisfies with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "NotIn",
|
||||
"values": ["securityscan3", "value3"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
|
||||
node: node1,
|
||||
fits: true,
|
||||
test: "satisfies the pod with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces":["DiffNameSpace"]
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel, Namespace: "ns"}}},
|
||||
node: node1,
|
||||
fits: false,
|
||||
test: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["antivirusscan", "value2"]
|
||||
}]
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
|
||||
node: node1,
|
||||
fits: false,
|
||||
test: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "Exists"
|
||||
}, {
|
||||
"key": "wrongkey",
|
||||
"operator": "DoesNotExist"
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}, {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan"]
|
||||
}, {
|
||||
"key": "service",
|
||||
"operator": "NotIn",
|
||||
"values": ["WrongValue"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
|
||||
node: node1,
|
||||
fits: true,
|
||||
test: "satisfies the PodAffinity with different label Operators in multiple RequiredDuringSchedulingIgnoredDuringExecution ",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "Exists"
|
||||
}, {
|
||||
"key": "wrongkey",
|
||||
"operator": "DoesNotExist"
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}, {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan2"]
|
||||
}, {
|
||||
"key": "service",
|
||||
"operator": "NotIn",
|
||||
"values": ["WrongValue"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
|
||||
node: node1,
|
||||
fits: false,
|
||||
test: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node becasue one of the matchExpression item don't match.",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}]
|
||||
},
|
||||
"podAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["antivirusscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "node"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
|
||||
node: node1,
|
||||
fits: true,
|
||||
test: "satisfies the PodAffinity and PodAntiAffinity with the existing pod",
|
||||
},
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//{
|
||||
// pod: &api.Pod{
|
||||
// ObjectMeta: api.ObjectMeta{
|
||||
// Labels: podLabel2,
|
||||
// Annotations: map[string]string{
|
||||
// api.AffinityAnnotationKey: `
|
||||
// {"podAffinity": {
|
||||
// "requiredDuringSchedulingRequiredDuringExecution": [
|
||||
// {
|
||||
// "labelSelector": {
|
||||
// "matchExpressions": [{
|
||||
// "key": "service",
|
||||
// "operator": "Exists"
|
||||
// }, {
|
||||
// "key": "wrongkey",
|
||||
// "operator": "DoesNotExist"
|
||||
// }]
|
||||
// },
|
||||
// "topologyKey": "region"
|
||||
// }, {
|
||||
// "labelSelector": {
|
||||
// "matchExpressions": [{
|
||||
// "key": "service",
|
||||
// "operator": "In",
|
||||
// "values": ["securityscan"]
|
||||
// }, {
|
||||
// "key": "service",
|
||||
// "operator": "NotIn",
|
||||
// "values": ["WrongValue"]
|
||||
// }]
|
||||
// },
|
||||
// "topologyKey": "region"
|
||||
// }
|
||||
// ]
|
||||
// }}`,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podlabel}}},
|
||||
// node: node1,
|
||||
// fits: true,
|
||||
// test: "satisfies the PodAffinity with different Label Operators in multiple RequiredDuringSchedulingRequiredDuringExecution ",
|
||||
//},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}]
|
||||
},
|
||||
"podAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["antivirusscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "node"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"},
|
||||
ObjectMeta: api.ObjectMeta{Labels: podLabel,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"PodAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["antivirusscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "node"
|
||||
}]
|
||||
}}`,
|
||||
}},
|
||||
}},
|
||||
node: node1,
|
||||
fits: true,
|
||||
test: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}]
|
||||
},
|
||||
"podAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "zone"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
|
||||
node: node1,
|
||||
fits: false,
|
||||
test: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}]
|
||||
},
|
||||
"podAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["antivirusscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "node"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"},
|
||||
ObjectMeta: api.ObjectMeta{Labels: podLabel,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"PodAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "zone"
|
||||
}]
|
||||
}}`,
|
||||
}},
|
||||
}},
|
||||
node: node1,
|
||||
fits: false,
|
||||
test: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: podLabel,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "NotIn",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
|
||||
node: node1,
|
||||
fits: false,
|
||||
test: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
node := test.node
|
||||
var podsOnNode []*api.Pod
|
||||
for _, pod := range test.pods {
|
||||
if pod.Spec.NodeName == node.Name {
|
||||
podsOnNode = append(podsOnNode, pod)
|
||||
}
|
||||
}
|
||||
|
||||
fit := PodAffinityChecker{
|
||||
info: FakeNodeInfo(node),
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
|
||||
}
|
||||
fits, err := fit.InterPodAffinityMatches(test.pod, test.node.Name, schedulercache.NewNodeInfo(podsOnNode...))
|
||||
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
|
||||
t.Errorf("%s: unexpected error %v", test.test, err)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("%s: expected %v got %v", test.test, test.fits, fits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
podLabelA := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
labelRgChina := map[string]string{
|
||||
"region": "China",
|
||||
}
|
||||
labelRgChinaAzAz1 := map[string]string{
|
||||
"region": "China",
|
||||
"az": "az1",
|
||||
}
|
||||
labelRgIndia := map[string]string{
|
||||
"region": "India",
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
fits map[string]bool
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "foo",
|
||||
"operator": "In",
|
||||
"values": ["bar"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelA}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
"machine1": true,
|
||||
"machine2": true,
|
||||
"machine3": false,
|
||||
},
|
||||
test: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{
|
||||
"nodeAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
"key": "hostname",
|
||||
"operator": "NotIn",
|
||||
"values": ["h1"]
|
||||
}]
|
||||
}]
|
||||
}
|
||||
},
|
||||
"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "foo",
|
||||
"operator": "In",
|
||||
"values": ["abc"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "region"
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "nodeA"}, ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||
{Spec: api.PodSpec{NodeName: "nodeB"}, ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "def"}}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": true,
|
||||
},
|
||||
test: "NodeA and nodeB have same topologyKey and label value. NodeA does not satisfy node affinity rule, but has an existing pod that match the inter pod affinity rule. The pod can be scheduled onto nodeB.",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "foo",
|
||||
"operator": "In",
|
||||
"values": ["bar"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "zone"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*api.Pod{},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
},
|
||||
test: "The affinity rule is to schedule all of the pods of this collection to the same zone. The first pod of the collection " +
|
||||
"should not be blocked from being scheduled onto any node, even there's no existing pod that match the rule anywhere.",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
nodeListInfo := FakeNodeListInfo(test.nodes)
|
||||
for _, node := range test.nodes {
|
||||
var podsOnNode []*api.Pod
|
||||
for _, pod := range test.pods {
|
||||
if pod.Spec.NodeName == node.Name {
|
||||
podsOnNode = append(podsOnNode, pod)
|
||||
}
|
||||
}
|
||||
|
||||
testFit := PodAffinityChecker{
|
||||
info: nodeListInfo,
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
|
||||
}
|
||||
fits, err := testFit.InterPodAffinityMatches(test.pod, node.Name, schedulercache.NewNodeInfo(podsOnNode...))
|
||||
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
|
||||
t.Errorf("%s: unexpected error %v", test.test, err)
|
||||
}
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(test.pod.ObjectMeta.Annotations)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if affinity.NodeAffinity != nil {
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
fits2, err := PodSelectorMatches(test.pod, node.Name, nodeInfo)
|
||||
if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
fits = fits && fits2
|
||||
}
|
||||
|
||||
if fits != test.fits[node.Name] {
|
||||
t.Errorf("%s: expected %v for %s got %v", test.test, test.fits[node.Name], node.Name, fits)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
type InterPodAffinity struct {
|
||||
info predicates.NodeInfo
|
||||
nodeLister algorithm.NodeLister
|
||||
podLister algorithm.PodLister
|
||||
hardPodAffinityWeight int
|
||||
failureDomains priorityutil.Topologies
|
||||
}
|
||||
|
||||
func NewInterPodAffinityPriority(info predicates.NodeInfo, nodeLister algorithm.NodeLister, podLister algorithm.PodLister, hardPodAffinityWeight int, failureDomains []string) algorithm.PriorityFunction {
|
||||
interPodAffinity := &InterPodAffinity{
|
||||
info: info,
|
||||
nodeLister: nodeLister,
|
||||
podLister: podLister,
|
||||
hardPodAffinityWeight: hardPodAffinityWeight,
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: failureDomains},
|
||||
}
|
||||
return interPodAffinity.CalculateInterPodAffinityPriority
|
||||
}
|
||||
|
||||
// countPodsThatMatchPodAffinityTerm counts the number of given pods that match the podAffinityTerm.
|
||||
func (ipa *InterPodAffinity) CountPodsThatMatchPodAffinityTerm(pod *api.Pod, podsForMatching []*api.Pod, node *api.Node, podAffinityTerm api.PodAffinityTerm) (int, error) {
|
||||
matchedCount := 0
|
||||
for _, ep := range podsForMatching {
|
||||
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(ep, pod, podAffinityTerm,
|
||||
func(ep *api.Pod) (*api.Node, error) {
|
||||
return ipa.info.GetNodeInfo(ep.Spec.NodeName)
|
||||
},
|
||||
func(pod *api.Pod) (*api.Node, error) {
|
||||
return node, nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if match {
|
||||
matchedCount++
|
||||
}
|
||||
}
|
||||
return matchedCount, nil
|
||||
}
|
||||
|
||||
// CountWeightByPodMatchAffinityTerm counts the weight to topologyCounts for all the given pods that match the podAffinityTerm.
|
||||
func (ipa *InterPodAffinity) CountWeightByPodMatchAffinityTerm(pod *api.Pod, podsForMatching []*api.Pod, weight int, podAffinityTerm api.PodAffinityTerm, node *api.Node) (int, error) {
|
||||
if weight == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
// get the pods which are there in that particular node
|
||||
podsMatchedCount, err := ipa.CountPodsThatMatchPodAffinityTerm(pod, podsForMatching, node, podAffinityTerm)
|
||||
return weight * podsMatchedCount, err
|
||||
}
|
||||
|
||||
// compute a sum by iterating through the elements of weightedPodAffinityTerm and adding
|
||||
// "weight" to the sum if the corresponding PodAffinityTerm is satisfied for
|
||||
// that node; the node(s) with the highest sum are the most preferred.
|
||||
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
|
||||
// symmetry need to be considered for hard requirements from podAffinity
|
||||
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allPods, err := ipa.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert the topology key based weights to the node name based weights
|
||||
var maxCount int
|
||||
var minCount int
|
||||
counts := map[string]int{}
|
||||
for _, node := range nodes.Items {
|
||||
totalCount := 0
|
||||
// count weights for the weighted pod affinity
|
||||
if affinity.PodAffinity != nil {
|
||||
for _, weightedTerm := range affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, weightedTerm.Weight, weightedTerm.PodAffinityTerm, &node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalCount += weightedCount
|
||||
}
|
||||
}
|
||||
|
||||
// count weights for the weighted pod anti-affinity
|
||||
if affinity.PodAntiAffinity != nil {
|
||||
for _, weightedTerm := range affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, (0 - weightedTerm.Weight), weightedTerm.PodAffinityTerm, &node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalCount += weightedCount
|
||||
}
|
||||
}
|
||||
|
||||
// reverse direction checking: count weights for the inter-pod affinity/anti-affinity rules
|
||||
// that are indicated by existing pods on the node.
|
||||
for _, ep := range allPods {
|
||||
epAffinity, err := api.GetAffinityFromPodAnnotations(ep.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if epAffinity.PodAffinity != nil {
|
||||
// count the implicit weight for the hard pod affinity indicated by the existing pod.
|
||||
if ipa.hardPodAffinityWeight > 0 {
|
||||
var podAffinityTerms []api.PodAffinityTerm
|
||||
if len(epAffinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
podAffinityTerms = epAffinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(affinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// podAffinityTerms = append(podAffinityTerms, affinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
for _, epAffinityTerm := range podAffinityTerms {
|
||||
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epAffinityTerm,
|
||||
func(pod *api.Pod) (*api.Node, error) { return &node, nil },
|
||||
func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) },
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
totalCount += ipa.hardPodAffinityWeight
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// count weight for the weighted pod affinity indicated by the existing pod.
|
||||
for _, epWeightedTerm := range epAffinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm,
|
||||
func(pod *api.Pod) (*api.Node, error) { return &node, nil },
|
||||
func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) },
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
totalCount += epWeightedTerm.Weight
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// count weight for the weighted pod anti-affinity indicated by the existing pod.
|
||||
if epAffinity.PodAntiAffinity != nil {
|
||||
for _, epWeightedTerm := range epAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm,
|
||||
func(pod *api.Pod) (*api.Node, error) { return &node, nil },
|
||||
func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) },
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
totalCount -= epWeightedTerm.Weight
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
counts[node.Name] = totalCount
|
||||
if counts[node.Name] > maxCount {
|
||||
maxCount = counts[node.Name]
|
||||
}
|
||||
if counts[node.Name] < minCount {
|
||||
minCount = counts[node.Name]
|
||||
}
|
||||
}
|
||||
|
||||
// calculate final priority score for each node
|
||||
result := []schedulerapi.HostPriority{}
|
||||
for _, node := range nodes.Items {
|
||||
fScore := float64(0)
|
||||
if (maxCount - minCount) > 0 {
|
||||
fScore = 10 * (float64(counts[node.Name]-minCount) / float64(maxCount-minCount))
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
glog.V(10).Infof(
|
||||
"%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore),
|
||||
)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
|
@ -0,0 +1,688 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
type FakeNodeListInfo []api.Node
|
||||
|
||||
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
|
||||
for _, node := range nodes {
|
||||
if node.Name == nodeName {
|
||||
return &node, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
|
||||
}
|
||||
|
||||
func TestInterPodAffinityPriority(t *testing.T) {
|
||||
labelRgChina := map[string]string{
|
||||
"region": "China",
|
||||
}
|
||||
labelRgIndia := map[string]string{
|
||||
"region": "India",
|
||||
}
|
||||
labelAzAz1 := map[string]string{
|
||||
"az": "az1",
|
||||
}
|
||||
labelAzAz2 := map[string]string{
|
||||
"az": "az2",
|
||||
}
|
||||
labelRgChinaAzAz1 := map[string]string{
|
||||
"region": "China",
|
||||
"az": "az1",
|
||||
}
|
||||
podLabelSecurityS1 := map[string]string{
|
||||
"security": "S1",
|
||||
}
|
||||
podLabelSecurityS2 := map[string]string{
|
||||
"security": "S2",
|
||||
}
|
||||
// considered only preferredDuringSchedulingIgnoredDuringExecution in pod affinity
|
||||
stayWithS1InRegion := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S1"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
stayWithS2InRegion := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 6,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
affinity3 := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"weight": 8,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "NotIn",
|
||||
"values":["S1"]
|
||||
}, {
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}, {
|
||||
"weight": 2,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "Exists"
|
||||
}, {
|
||||
"key": "wrongkey",
|
||||
"operator": "DoesNotExist"
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
}
|
||||
hardAffinity := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"labelSelector":{
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values": ["S1", "value2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}, {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "Exists"
|
||||
}, {
|
||||
"key": "wrongkey",
|
||||
"operator": "DoesNotExist"
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
}
|
||||
awayFromS1InAz := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S1"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "az"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
// to stay away from security S2 in any az.
|
||||
awayFromS2InAz := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "az"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
// to stay with security S1 in same region, stay away from security S2 in any az.
|
||||
stayWithS1InRegionAwayFromS2InAz := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 8,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S1"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
}]
|
||||
},
|
||||
"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "az"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
|
||||
test: "all machines are same priority as Affinity is nil",
|
||||
},
|
||||
// the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||
// the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score
|
||||
// the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||
test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
|
||||
"which doesn't match either pods in nodes or in topology key",
|
||||
},
|
||||
// the node1(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||
// the node2(machine2) that have the label {"region": "China"}, match the topology key and have the same label value with node1, get the same high score with node1
|
||||
// the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector,
|
||||
// get a low score.
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegion}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 0}},
|
||||
test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
|
||||
},
|
||||
// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
|
||||
// But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia.
|
||||
// Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score),
|
||||
// while all the nodes in regionIndia should get another same score(low score).
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 5}, {"machine3", 10}, {"machine4", 10}, {"machine5", 5}},
|
||||
test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
|
||||
},
|
||||
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 2}, {"machine2", 10}, {"machine3", 0}},
|
||||
test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
|
||||
},
|
||||
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
|
||||
// but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference.
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 0}},
|
||||
test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 0}},
|
||||
test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
},
|
||||
|
||||
// The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity.
|
||||
// the nodes that have the label {"node": "bar"} (match the topology key) and that have existing pods that match the labelSelector get low score
|
||||
// the nodes that don't have the label {"node": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get high score
|
||||
// the nodes that have the label {"node": "bar"} (match the topology key) but that have existing pods that mismatch the labelSelector get high score
|
||||
// there are 2 nodes, say node1 and node2, both nodes have pods that match the labelSelector and have topology-key in node.Labels.
|
||||
// But there are more pods on node1 that match the preference than node2. Then, node1 get a lower score than node2.
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
|
||||
test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
|
||||
test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
|
||||
test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
|
||||
},
|
||||
// Test the symmetry cases for anti affinity
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
|
||||
test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
|
||||
},
|
||||
// Test both affinity and anti-affinity
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
|
||||
},
|
||||
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
|
||||
// the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level,
|
||||
// so that all the pods of a RC/service can stay in a same region but trying to separate with each other
|
||||
// machine-1,machine-3,machine-4 are in ChinaRegion others machin-2,machine-5 are in IndiaRegion
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 4}, {"machine3", 10}, {"machine4", 10}, {"machine5", 4}},
|
||||
test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
|
||||
},
|
||||
// Consider Affinity, Anti Affinity and symmetry together.
|
||||
// for Affinity, the weights are: 8, 0, 0, 0
|
||||
// for Anti Affinity, the weights are: 0, -5, 0, 0
|
||||
// for Affinity symmetry, the weights are: 0, 0, 8, 0
|
||||
// for Anti Affinity symmetry, the weights are: 0, 0, 0, -5
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Annotations: awayFromS1InAz}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 10}, {"machine4", 0}},
|
||||
test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
|
||||
interPodAffinity := InterPodAffinity{
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}),
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
|
||||
}
|
||||
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
podLabelServiceS1 := map[string]string{
|
||||
"service": "S1",
|
||||
}
|
||||
labelRgChina := map[string]string{
|
||||
"region": "China",
|
||||
}
|
||||
labelRgIndia := map[string]string{
|
||||
"region": "India",
|
||||
}
|
||||
labelAzAz1 := map[string]string{
|
||||
"az": "az1",
|
||||
}
|
||||
hardPodAffinity := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"labelSelector":{
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["S1"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "region"
|
||||
}
|
||||
]
|
||||
}}`,
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
hardPodAffinityWeight int
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 0}},
|
||||
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: 0,
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
|
||||
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
|
||||
ipa := InterPodAffinity{
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}),
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: test.hardPodAffinityWeight,
|
||||
}
|
||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
||||
labelAzAZ1 := map[string]string{
|
||||
"az": "az1",
|
||||
}
|
||||
LabelZoneFailureDomainAZ1 := map[string]string{
|
||||
unversioned.LabelZoneFailureDomain: "az1",
|
||||
}
|
||||
podLabel1 := map[string]string{
|
||||
"security": "S1",
|
||||
}
|
||||
antiAffinity1 := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S1"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": ""
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
failureDomains priorityutil.Topologies
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||
},
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
|
||||
test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, match among topologyKeys indicated by failure domains.",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||
},
|
||||
failureDomains: priorityutil.Topologies{},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, and no failure domains indicated, regard as topologyKey not match.",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
|
||||
ipa := InterPodAffinity{
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}),
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
||||
failureDomains: test.failureDomains,
|
||||
}
|
||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,7 +16,12 @@ limitations under the License.
|
|||
|
||||
package util
|
||||
|
||||
import "k8s.io/kubernetes/pkg/api"
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
// For each of these resources, a pod that doesn't request the resource explicitly
|
||||
// will be treated as having requested the amount indicated below, for the purpose
|
||||
|
@ -48,3 +53,83 @@ func GetNonzeroRequests(requests *api.ResourceList) (int64, int64) {
|
|||
}
|
||||
return outMilliCPU, outMemory
|
||||
}
|
||||
|
||||
// FilterPodsByNameSpaces filters the pods based the given list of namespaces,
|
||||
// empty set of namespaces means all namespaces.
|
||||
func FilterPodsByNameSpaces(names sets.String, pods []*api.Pod) []*api.Pod {
|
||||
if len(pods) == 0 || len(names) == 0 {
|
||||
return pods
|
||||
}
|
||||
result := []*api.Pod{}
|
||||
for _, pod := range pods {
|
||||
if names.Has(pod.Namespace) {
|
||||
result = append(result, pod)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetNamespacesFromPodAffinityTerm returns a set of names
|
||||
// according to the namespaces indicated in podAffinityTerm.
|
||||
// if the NameSpaces is nil considers the given pod's namespace
|
||||
// if the Namespaces is empty list then considers all the namespaces
|
||||
func GetNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffinityTerm) sets.String {
|
||||
names := sets.String{}
|
||||
if podAffinityTerm.Namespaces == nil {
|
||||
names.Insert(pod.Namespace)
|
||||
} else if len(podAffinityTerm.Namespaces) != 0 {
|
||||
names.Insert(podAffinityTerm.Namespaces...)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// NodesHaveSameTopologyKeyInternal checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||
func NodesHaveSameTopologyKeyInternal(nodeA, nodeB *api.Node, topologyKey string) bool {
|
||||
return nodeA.Labels != nil && nodeB.Labels != nil && len(nodeA.Labels[topologyKey]) > 0 && nodeA.Labels[topologyKey] == nodeB.Labels[topologyKey]
|
||||
}
|
||||
|
||||
type Topologies struct {
|
||||
DefaultKeys []string
|
||||
}
|
||||
|
||||
// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||
// If the topologyKey is nil/empty, check if the two nodes have any of the default topologyKeys, and have same corresponding label value.
|
||||
func (tps *Topologies) NodesHaveSameTopologyKey(nodeA *api.Node, nodeB *api.Node, topologyKey string) bool {
|
||||
if len(topologyKey) == 0 {
|
||||
// assumes this is allowed only for PreferredDuringScheduling pod anti-affinity (ensured by api/validation)
|
||||
for _, defaultKey := range tps.DefaultKeys {
|
||||
if NodesHaveSameTopologyKeyInternal(nodeA, nodeB, defaultKey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
} else {
|
||||
return NodesHaveSameTopologyKeyInternal(nodeA, nodeB, topologyKey)
|
||||
}
|
||||
}
|
||||
|
||||
type getNodeFunc func(*api.Pod) (*api.Node, error)
|
||||
|
||||
// CheckIfPodMatchPodAffinityTerm checks if podB's affinity request is compatible with podA
|
||||
func (tps *Topologies) CheckIfPodMatchPodAffinityTerm(podA *api.Pod, podB *api.Pod, podBAffinityTerm api.PodAffinityTerm, getNodeA, getNodeB getNodeFunc) (bool, error) {
|
||||
names := GetNamespacesFromPodAffinityTerm(podB, podBAffinityTerm)
|
||||
if len(names) != 0 && !names.Has(podA.Namespace) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
labelSelector, err := unversioned.LabelSelectorAsSelector(podBAffinityTerm.LabelSelector)
|
||||
if err != nil || !labelSelector.Matches(labels.Set(podA.Labels)) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
podANode, err := getNodeA(podA)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
podBNode, err := getNodeB(podB)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return tps.NodesHaveSameTopologyKey(podANode, podBNode, podBAffinityTerm.TopologyKey), nil
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
"net/http/httptest"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
|
@ -116,7 +117,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||
defer server.Close()
|
||||
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
|
||||
if _, err := factory.NewConfigFactory(client, "some-scheduler-name").CreateFromConfig(policy); err != nil {
|
||||
if _, err := factory.NewConfigFactory(client, "some-scheduler-name", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains).CreateFromConfig(policy); err != nil {
|
||||
t.Errorf("%s: Error constructing: %v", v, err)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -32,8 +32,10 @@ import (
|
|||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// GCE instances can have up to 16 PD volumes attached.
|
||||
const DefaultMaxGCEPDVolumes = 16
|
||||
const (
|
||||
// GCE instances can have up to 16 PD volumes attached.
|
||||
DefaultMaxGCEPDVolumes = 16
|
||||
)
|
||||
|
||||
// getMaxVols checks the max PD volumes environment variable, otherwise returning a default value
|
||||
func getMaxVols(defaultVal int) int {
|
||||
|
@ -71,7 +73,7 @@ func init() {
|
|||
},
|
||||
)
|
||||
// PodFitsPorts has been replaced by PodFitsHostPorts for better user understanding.
|
||||
// For backwards compatibility with 1.0, PodFitsPorts is regitered as well.
|
||||
// For backwards compatibility with 1.0, PodFitsPorts is registered as well.
|
||||
factory.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsHostPorts)
|
||||
// ImageLocalityPriority prioritizes nodes based on locality of images requested by a pod. Nodes with larger size
|
||||
// of already-installed packages required by the pod will be preferred over nodes with no already-installed
|
||||
|
@ -125,6 +127,13 @@ func defaultPredicates() sets.String {
|
|||
// GeneralPredicates are the predicates that are enforced by all Kubernetes components
|
||||
// (e.g. kubelet and all schedulers)
|
||||
factory.RegisterFitPredicate("GeneralPredicates", predicates.GeneralPredicates),
|
||||
// Fit is determined by inter-pod affinity.
|
||||
factory.RegisterFitPredicateFactory(
|
||||
"MatchInterPodAffinity",
|
||||
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
|
||||
return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister, args.FailureDomains)
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -153,5 +162,16 @@ func defaultPriorities() sets.String {
|
|||
Weight: 1,
|
||||
},
|
||||
),
|
||||
//pods should be placed in the same topological domain (e.g. same node, same rack, same zone, same power domain, etc.)
|
||||
//as some other pods, or, conversely, should not be placed in the same topological domain as some other pods.
|
||||
factory.RegisterPriorityConfigFactory(
|
||||
"InterPodAffinityPriority",
|
||||
factory.PriorityConfigFactory{
|
||||
Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction {
|
||||
return priorities.NewInterPodAffinityPriority(args.NodeInfo, args.NodeLister, args.PodLister, args.HardPodAffinitySymmetricWeight, args.FailureDomains)
|
||||
},
|
||||
Weight: 1,
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package factory
|
|||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -34,6 +35,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
utilvalidation "k8s.io/kubernetes/pkg/util/validation"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
|
@ -83,10 +85,18 @@ type ConfigFactory struct {
|
|||
// processed by this scheduler, based on pods's annotation key:
|
||||
// 'scheduler.alpha.kubernetes.io/name'
|
||||
SchedulerName string
|
||||
|
||||
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
|
||||
HardPodAffinitySymmetricWeight int
|
||||
|
||||
// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
FailureDomains string
|
||||
}
|
||||
|
||||
// Initializes the factory.
|
||||
func NewConfigFactory(client *client.Client, schedulerName string) *ConfigFactory {
|
||||
func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffinitySymmetricWeight int, failureDomains string) *ConfigFactory {
|
||||
stopEverything := make(chan struct{})
|
||||
schedulerCache := schedulercache.New(30*time.Second, stopEverything)
|
||||
|
||||
|
@ -95,15 +105,17 @@ func NewConfigFactory(client *client.Client, schedulerName string) *ConfigFactor
|
|||
PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
|
||||
ScheduledPodLister: &cache.StoreToPodLister{},
|
||||
// Only nodes in the "Ready" condition with status == "True" are schedulable
|
||||
NodeLister: &cache.StoreToNodeLister{},
|
||||
PVLister: &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
PVCLister: &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
schedulerCache: schedulerCache,
|
||||
StopEverything: stopEverything,
|
||||
SchedulerName: schedulerName,
|
||||
NodeLister: &cache.StoreToNodeLister{},
|
||||
PVLister: &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
PVCLister: &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
schedulerCache: schedulerCache,
|
||||
StopEverything: stopEverything,
|
||||
SchedulerName: schedulerName,
|
||||
HardPodAffinitySymmetricWeight: hardPodAffinitySymmetricWeight,
|
||||
FailureDomains: failureDomains,
|
||||
}
|
||||
|
||||
c.PodLister = schedulerCache
|
||||
|
@ -287,6 +299,18 @@ func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler
|
|||
// Creates a scheduler from a set of registered fit predicate keys and priority keys.
|
||||
func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*scheduler.Config, error) {
|
||||
glog.V(2).Infof("creating scheduler with fit predicates '%v' and priority functions '%v", predicateKeys, priorityKeys)
|
||||
|
||||
if f.HardPodAffinitySymmetricWeight < 0 || f.HardPodAffinitySymmetricWeight > 100 {
|
||||
return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 0-100", f.HardPodAffinitySymmetricWeight)
|
||||
}
|
||||
|
||||
failureDomainArgs := strings.Split(f.FailureDomains, ",")
|
||||
for _, failureDomain := range failureDomainArgs {
|
||||
if !utilvalidation.IsQualifiedName(failureDomain) {
|
||||
return nil, fmt.Errorf("invalid failure domain: %s", failureDomain)
|
||||
}
|
||||
}
|
||||
|
||||
pluginArgs := PluginFactoryArgs{
|
||||
PodLister: f.PodLister,
|
||||
ServiceLister: f.ServiceLister,
|
||||
|
@ -297,6 +321,8 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String,
|
|||
NodeInfo: &predicates.CachedNodeInfo{StoreToNodeLister: f.NodeLister},
|
||||
PVInfo: f.PVLister,
|
||||
PVCInfo: f.PVCLister,
|
||||
HardPodAffinitySymmetricWeight: f.HardPodAffinitySymmetricWeight,
|
||||
FailureDomains: sets.NewString(failureDomainArgs...).List(),
|
||||
}
|
||||
predicateFuncs, err := getFitPredicateFunctions(predicateKeys, pluginArgs)
|
||||
if err != nil {
|
||||
|
|
|
@ -47,7 +47,7 @@ func TestCreate(t *testing.T) {
|
|||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
factory := NewConfigFactory(client, api.DefaultSchedulerName)
|
||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
factory.Create()
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ func TestCreateFromConfig(t *testing.T) {
|
|||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
factory := NewConfigFactory(client, api.DefaultSchedulerName)
|
||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
|
@ -106,7 +106,7 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
|||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
factory := NewConfigFactory(client, api.DefaultSchedulerName)
|
||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
|
||||
configData = []byte(`{}`)
|
||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||
|
@ -148,7 +148,7 @@ func TestDefaultErrorFunc(t *testing.T) {
|
|||
mux.Handle(testapi.Default.ResourcePath("pods", "bar", "foo"), &handler)
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
factory := NewConfigFactory(client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), api.DefaultSchedulerName)
|
||||
factory := NewConfigFactory(client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
|
||||
podBackoff := podBackoff{
|
||||
perPodBackoff: map[types.NamespacedName]*backoffEntry{},
|
||||
|
@ -318,9 +318,9 @@ func TestResponsibleForPod(t *testing.T) {
|
|||
defer server.Close()
|
||||
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
// factory of "default-scheduler"
|
||||
factoryDefaultScheduler := NewConfigFactory(client, api.DefaultSchedulerName)
|
||||
factoryDefaultScheduler := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
// factory of "foo-scheduler"
|
||||
factoryFooScheduler := NewConfigFactory(client, "foo-scheduler")
|
||||
factoryFooScheduler := NewConfigFactory(client, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
// scheduler annotaions to be tested
|
||||
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
|
||||
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
|
||||
|
@ -370,3 +370,63 @@ func TestResponsibleForPod(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer server.Close()
|
||||
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
// factory of "default-scheduler"
|
||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, -1, api.DefaultFailureDomains)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: invalid hardPodAffinitySymmetricWeight, got nothing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidFactoryArgs(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
|
||||
testCases := []struct {
|
||||
hardPodAffinitySymmetricWeight int
|
||||
failureDomains string
|
||||
expectErr string
|
||||
}{
|
||||
{
|
||||
hardPodAffinitySymmetricWeight: -1,
|
||||
failureDomains: api.DefaultFailureDomains,
|
||||
expectErr: "invalid hardPodAffinitySymmetricWeight: -1, must be in the range 0-100",
|
||||
},
|
||||
{
|
||||
hardPodAffinitySymmetricWeight: 101,
|
||||
failureDomains: api.DefaultFailureDomains,
|
||||
expectErr: "invalid hardPodAffinitySymmetricWeight: 101, must be in the range 0-100",
|
||||
},
|
||||
{
|
||||
hardPodAffinitySymmetricWeight: 0,
|
||||
failureDomains: "INVALID_FAILURE_DOMAINS",
|
||||
expectErr: "invalid failure domain: INVALID_FAILURE_DOMAINS",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, test.hardPodAffinitySymmetricWeight, test.failureDomains)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: %s, got nothing", test.expectErr)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -33,14 +33,16 @@ import (
|
|||
|
||||
// PluginFactoryArgs are passed to all plugin factory functions.
|
||||
type PluginFactoryArgs struct {
|
||||
PodLister algorithm.PodLister
|
||||
ServiceLister algorithm.ServiceLister
|
||||
ControllerLister algorithm.ControllerLister
|
||||
ReplicaSetLister algorithm.ReplicaSetLister
|
||||
NodeLister algorithm.NodeLister
|
||||
NodeInfo predicates.NodeInfo
|
||||
PVInfo predicates.PersistentVolumeInfo
|
||||
PVCInfo predicates.PersistentVolumeClaimInfo
|
||||
PodLister algorithm.PodLister
|
||||
ServiceLister algorithm.ServiceLister
|
||||
ControllerLister algorithm.ControllerLister
|
||||
ReplicaSetLister algorithm.ReplicaSetLister
|
||||
NodeLister algorithm.NodeLister
|
||||
NodeInfo predicates.NodeInfo
|
||||
PVInfo predicates.PersistentVolumeInfo
|
||||
PVCInfo predicates.PersistentVolumeClaimInfo
|
||||
HardPodAffinitySymmetricWeight int
|
||||
FailureDomains []string
|
||||
}
|
||||
|
||||
// A FitPredicateFactory produces a FitPredicate from the given args.
|
||||
|
|
|
@ -62,7 +62,7 @@ func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destro
|
|||
Burst: 5000,
|
||||
})
|
||||
|
||||
schedulerConfigFactory = factory.NewConfigFactory(c, api.DefaultSchedulerName)
|
||||
schedulerConfigFactory = factory.NewConfigFactory(c, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
panic("Couldn't create scheduler config")
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: with-newlabels
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/affinity: >
|
||||
{
|
||||
"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S1"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "kubernetes.io/hostname"
|
||||
}]
|
||||
},
|
||||
"podAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "kubernetes.io/hostname"
|
||||
}]
|
||||
}
|
||||
}
|
||||
another-annotation-key: another-annotation-value
|
||||
spec:
|
||||
containers:
|
||||
- name: with-newlabels
|
||||
image: gcr.io/google_containers/pause:2.0
|
|
@ -393,7 +393,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
cleanupPods(c, ns)
|
||||
})
|
||||
|
||||
It("validates that a pod with an invalid NodeAffinity is rejected [Feature:NodeAffinity]", func() {
|
||||
It("validates that a pod with an invalid NodeAffinity is rejected", func() {
|
||||
|
||||
By("Trying to launch a pod with an invalid Affinity data.")
|
||||
podName := "without-label"
|
||||
|
@ -517,7 +517,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
|
||||
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
|
||||
// non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
|
||||
It("validates that NodeAffinity is respected if not matching [Feature:NodeAffinity]", func() {
|
||||
It("validates that NodeAffinity is respected if not matching", func() {
|
||||
By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||
podName := "restricted-pod"
|
||||
|
||||
|
@ -573,7 +573,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
|
||||
// Keep the same steps with the test on NodeSelector,
|
||||
// but specify Affinity in Pod.Annotations, instead of NodeSelector.
|
||||
It("validates that required NodeAffinity setting is respected if matching [Feature:NodeAffinity]", func() {
|
||||
It("validates that required NodeAffinity setting is respected if matching", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||
|
@ -667,7 +667,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
})
|
||||
|
||||
// Verify that an escaped JSON string of NodeAffinity in a YAML PodSpec works.
|
||||
It("validates that embedding the JSON NodeAffinity setting as a string in the annotation value work [Feature:NodeAffinity]", func() {
|
||||
It("validates that embedding the JSON NodeAffinity setting as a string in the annotation value work", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||
|
@ -712,7 +712,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
|
||||
By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.")
|
||||
labelPodName := "with-labels"
|
||||
nodeSelectionRoot := filepath.Join(framework.TestContext.RepoRoot, "docs/user-guide/node-selection")
|
||||
nodeSelectionRoot := filepath.Join(framework.TestContext.RepoRoot, "test/e2e/node-selection")
|
||||
testPodPath := filepath.Join(nodeSelectionRoot, "pod-with-node-affinity.yaml")
|
||||
framework.RunKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns))
|
||||
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
||||
|
@ -727,4 +727,551 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
framework.ExpectNoError(err)
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
|
||||
// labelSelector Operator is DoesNotExist but values are there in requiredDuringSchedulingIgnoredDuringExecution
|
||||
// part of podAffinity,so validation fails.
|
||||
It("validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid", func() {
|
||||
By("Trying to launch a pod with an invalid pod Affinity data.")
|
||||
podName := "without-label-" + string(util.NewUUID())
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": "without-label"},
|
||||
Annotations: map[string]string{
|
||||
"scheduler.alpha.kubernetes.io/affinity": `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 0,
|
||||
"podAffinityTerm": {
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "DoesNotExist",
|
||||
"values":["securityscan"]
|
||||
}]
|
||||
},
|
||||
"namespaces": [],
|
||||
"topologyKey": "kubernetes.io/hostname"
|
||||
}
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err == nil || !errors.IsInvalid(err) {
|
||||
framework.Failf("Expect error of invalid, got : %v", err)
|
||||
}
|
||||
|
||||
// Wait a bit to allow scheduler to do its thing if the pod is not rejected.
|
||||
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
||||
framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
cleanupPods(c, ns)
|
||||
})
|
||||
|
||||
// Test Nodes does not have any pod, hence it should be impossible to schedule a Pod with pod affinity.
|
||||
It("validates that Inter-pod-Affinity is respected if not matching", func() {
|
||||
By("Trying to schedule Pod with nonempty Pod Affinity.")
|
||||
podName := "without-label-" + string(util.NewUUID())
|
||||
|
||||
waitForStableCluster(c)
|
||||
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Annotations: map[string]string{
|
||||
"scheduler.alpha.kubernetes.io/affinity": `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector":{
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["securityscan", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "kubernetes.io/hostname"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
// Wait a bit to allow scheduler to do its thing
|
||||
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
||||
framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
verifyResult(c, podName, ns)
|
||||
cleanupPods(c, ns)
|
||||
})
|
||||
|
||||
// test the pod affinity successful matching scenario.
|
||||
It("validates that InterPodAffinity is respected if matching", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||
// scheduled onto it.
|
||||
By("Trying to launch a pod with a label to get a node which can launch it.")
|
||||
podName := "with-label-" + string(util.NewUUID())
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"security": "S1"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns))
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
defer c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
|
||||
|
||||
By("Trying to apply a random label on the found node.")
|
||||
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
|
||||
v := "china-e2etest"
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
||||
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
node, err := c.Nodes().Get(nodeName)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(node.Labels[k]).To(Equal(v))
|
||||
|
||||
By("Trying to launch the pod, now with podAffinity.")
|
||||
labelPodName := "with-podaffinity-" + string(util.NewUUID())
|
||||
_, err = c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: labelPodName,
|
||||
Annotations: map[string]string{
|
||||
"scheduler.alpha.kubernetes.io/affinity": `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector":{
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values": ["S1", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "` + k + `",
|
||||
"namespaces":["` + ns + `"]
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: labelPodName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
||||
|
||||
// check that pod got scheduled. We intentionally DO NOT check that the
|
||||
// pod is running because this will create a race condition with the
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new label yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName))
|
||||
labelPod, err := c.Pods(ns).Get(labelPodName)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
|
||||
// test when the pod anti affinity rule is not satisfied, the pod would stay pending.
|
||||
It("validates that InterPodAntiAffinity is respected if matching", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||
// scheduled onto it.
|
||||
By("Trying to launch a pod with a label to get a node which can launch it.")
|
||||
podName := "with-label-" + string(util.NewUUID())
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"service": "S1"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns))
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
|
||||
By("Trying to apply a random label on the found node.")
|
||||
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
|
||||
v := "china-e2etest"
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
||||
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
node, err := c.Nodes().Get(nodeName)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(node.Labels[k]).To(Equal(v))
|
||||
|
||||
By("Trying to launch the pod, now with podAffinity with same Labels.")
|
||||
labelPodName := "with-podaffinity-" + string(util.NewUUID())
|
||||
_, err = c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: labelPodName,
|
||||
Labels: map[string]string{"service": "Diff"},
|
||||
Annotations: map[string]string{
|
||||
"scheduler.alpha.kubernetes.io/affinity": `
|
||||
{"podAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector":{
|
||||
"matchExpressions": [{
|
||||
"key": "service",
|
||||
"operator": "In",
|
||||
"values": ["S1", "value2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "` + k + `",
|
||||
"namespaces": ["` + ns + `"]
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: labelPodName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
// Wait a bit to allow scheduler to do its thing
|
||||
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
||||
framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
verifyResult(c, labelPodName, ns)
|
||||
cleanupPods(c, ns)
|
||||
})
|
||||
|
||||
// test the pod affinity successful matching scenario with multiple Label Operators.
|
||||
It("validates that InterPodAffinity is respected if matching with multiple Affinities", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||
// scheduled onto it.
|
||||
By("Trying to launch a pod with a label to get a node which can launch it.")
|
||||
podName := "with-label-" + string(util.NewUUID())
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"security": "S1"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns))
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
defer c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
|
||||
|
||||
By("Trying to apply a random label on the found node.")
|
||||
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
|
||||
v := "kubernetes-e2e"
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
||||
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
node, err := c.Nodes().Get(nodeName)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(node.Labels[k]).To(Equal(v))
|
||||
|
||||
By("Trying to launch the pod, now with multiple pod affinities with diff LabelOperators.")
|
||||
labelPodName := "with-podaffinity-" + string(util.NewUUID())
|
||||
_, err = c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: labelPodName,
|
||||
Annotations: map[string]string{
|
||||
"scheduler.alpha.kubernetes.io/affinity": `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector":{
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values": ["S1", "value2"]
|
||||
},
|
||||
{
|
||||
"key": "security",
|
||||
"operator": "NotIn",
|
||||
"values": ["S2"]
|
||||
},
|
||||
{
|
||||
"key": "security",
|
||||
"operator":"Exists"
|
||||
}]
|
||||
},
|
||||
"topologyKey": "` + k + `"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: labelPodName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
||||
|
||||
// check that pod got scheduled. We intentionally DO NOT check that the
|
||||
// pod is running because this will create a race condition with the
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new label yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName))
|
||||
labelPod, err := c.Pods(ns).Get(labelPodName)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
|
||||
// test the pod affinity and anti affinity successful matching scenario.
|
||||
It("validates that InterPod Affinity and AntiAffinity is respected if matching", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||
// scheduled onto it.
|
||||
By("Trying to launch a pod with a label to get a node which can launch it.")
|
||||
podName := "with-label-" + string(util.NewUUID())
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"security": "S1"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns))
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
defer c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
|
||||
|
||||
By("Trying to apply a random label on the found node.")
|
||||
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
|
||||
v := "e2e-testing"
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
||||
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
node, err := c.Nodes().Get(nodeName)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(node.Labels[k]).To(Equal(v))
|
||||
|
||||
By("Trying to launch the pod, now with Pod affinity and anti affinity.")
|
||||
labelPodName := "with-podantiaffinity-" + string(util.NewUUID())
|
||||
_, err = c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: labelPodName,
|
||||
Annotations: map[string]string{
|
||||
"scheduler.alpha.kubernetes.io/affinity": `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S1"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "` + k + `"
|
||||
}]
|
||||
},
|
||||
"podAntiAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "security",
|
||||
"operator": "In",
|
||||
"values":["S2"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "` + k + `"
|
||||
}]
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: labelPodName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
||||
|
||||
// check that pod got scheduled. We intentionally DO NOT check that the
|
||||
// pod is running because this will create a race condition with the
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new label yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName))
|
||||
labelPod, err := c.Pods(ns).Get(labelPodName)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
|
||||
// Verify that an escaped JSON string of pod affinity and pod anti affinity in a YAML PodSpec works.
|
||||
It("validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||
// scheduled onto it.
|
||||
By("Trying to launch a pod with label to get a node which can launch it.")
|
||||
podName := "with-label-" + string(util.NewUUID())
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"security": "S1"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns))
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
defer c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
|
||||
|
||||
By("Trying to apply a label with fake az info on the found node.")
|
||||
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
|
||||
v := "e2e-az1"
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
||||
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
node, err := c.Nodes().Get(nodeName)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(node.Labels[k]).To(Equal(v))
|
||||
|
||||
By("Trying to launch a pod that with PodAffinity & PodAntiAffinity setting as embedded JSON string in the annotation value.")
|
||||
labelPodName := "with-newlabels"
|
||||
nodeSelectionRoot := filepath.Join(framework.TestContext.RepoRoot, "test/e2e/node-selection")
|
||||
testPodPath := filepath.Join(nodeSelectionRoot, "pod-with-pod-affinity.yaml")
|
||||
framework.RunKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns))
|
||||
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
||||
|
||||
// check that pod got scheduled. We intentionally DO NOT check that the
|
||||
// pod is running because this will create a race condition with the
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new label yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName))
|
||||
labelPod, err := c.Pods(ns).Get(labelPodName)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
})
|
||||
|
|
|
@ -241,7 +241,7 @@ func TestSchedulerExtender(t *testing.T) {
|
|||
}
|
||||
policy.APIVersion = testapi.Default.GroupVersion().String()
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
|
|
|
@ -68,7 +68,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
|||
|
||||
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
|
@ -315,7 +315,7 @@ func TestMultiScheduler(t *testing.T) {
|
|||
// 1. create and start default-scheduler
|
||||
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
|
@ -386,7 +386,7 @@ func TestMultiScheduler(t *testing.T) {
|
|||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
restClient2 := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
|
||||
schedulerConfigFactory2 := factory.NewConfigFactory(restClient2, "foo-scheduler")
|
||||
schedulerConfigFactory2 := factory.NewConfigFactory(restClient2, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||
|
@ -484,7 +484,7 @@ func TestAllocatable(t *testing.T) {
|
|||
// 1. create and start default-scheduler
|
||||
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
|
|
Loading…
Reference in New Issue