mirror of https://github.com/k3s-io/k3s
Fix golint errors in `pkg/scheduler` based on golint check
parent
a4e49d19f9
commit
3fdacfead5
|
@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package scheduler contains a generic Scheduler interface and several
|
// Package algorithm contains a generic Scheduler interface and several
|
||||||
// implementations.
|
// implementations.
|
||||||
package algorithm // import "k8s.io/kubernetes/pkg/scheduler/algorithm"
|
package algorithm // import "k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||||
|
|
|
@ -30,27 +30,50 @@ var (
|
||||||
// be made to pass by removing pods, or you change an existing predicate so that
|
// be made to pass by removing pods, or you change an existing predicate so that
|
||||||
// it can never be made to pass by removing pods, you need to add the predicate
|
// it can never be made to pass by removing pods, you need to add the predicate
|
||||||
// failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go
|
// failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go
|
||||||
|
|
||||||
|
// ErrDiskConflict is used for NoDiskConflict predicate error.
|
||||||
ErrDiskConflict = newPredicateFailureError("NoDiskConflict", "node(s) had no available disk")
|
ErrDiskConflict = newPredicateFailureError("NoDiskConflict", "node(s) had no available disk")
|
||||||
|
// ErrVolumeZoneConflict is used for NoVolumeZoneConflict predicate error.
|
||||||
ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict", "node(s) had no available volume zone")
|
ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict", "node(s) had no available volume zone")
|
||||||
|
// ErrNodeSelectorNotMatch is used for MatchNodeSelector predicate error.
|
||||||
ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector", "node(s) didn't match node selector")
|
ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector", "node(s) didn't match node selector")
|
||||||
|
// ErrPodAffinityNotMatch is used for MatchInterPodAffinity predicate error.
|
||||||
ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity", "node(s) didn't match pod affinity/anti-affinity")
|
ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity", "node(s) didn't match pod affinity/anti-affinity")
|
||||||
|
// ErrPodAffinityRulesNotMatch is used for PodAffinityRulesNotMatch predicate error.
|
||||||
ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch", "node(s) didn't match pod affinity rules")
|
ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch", "node(s) didn't match pod affinity rules")
|
||||||
|
// ErrPodAntiAffinityRulesNotMatch is used for PodAntiAffinityRulesNotMatch predicate error.
|
||||||
ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch", "node(s) didn't match pod anti-affinity rules")
|
ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch", "node(s) didn't match pod anti-affinity rules")
|
||||||
|
// ErrExistingPodsAntiAffinityRulesNotMatch is used for ExistingPodsAntiAffinityRulesNotMatch predicate error.
|
||||||
ErrExistingPodsAntiAffinityRulesNotMatch = newPredicateFailureError("ExistingPodsAntiAffinityRulesNotMatch", "node(s) didn't satisfy existing pods anti-affinity rules")
|
ErrExistingPodsAntiAffinityRulesNotMatch = newPredicateFailureError("ExistingPodsAntiAffinityRulesNotMatch", "node(s) didn't satisfy existing pods anti-affinity rules")
|
||||||
|
// ErrTaintsTolerationsNotMatch is used for PodToleratesNodeTaints predicate error.
|
||||||
ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints", "node(s) had taints that the pod didn't tolerate")
|
ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints", "node(s) had taints that the pod didn't tolerate")
|
||||||
|
// ErrPodNotMatchHostName is used for HostName predicate error.
|
||||||
ErrPodNotMatchHostName = newPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
|
ErrPodNotMatchHostName = newPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
|
||||||
|
// ErrPodNotFitsHostPorts is used for PodFitsHostPorts predicate error.
|
||||||
ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts", "node(s) didn't have free ports for the requested pod ports")
|
ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts", "node(s) didn't have free ports for the requested pod ports")
|
||||||
|
// ErrNodeLabelPresenceViolated is used for CheckNodeLabelPresence predicate error.
|
||||||
ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence", "node(s) didn't have the requested labels")
|
ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence", "node(s) didn't have the requested labels")
|
||||||
|
// ErrServiceAffinityViolated is used for CheckServiceAffinity predicate error.
|
||||||
ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity", "node(s) didn't match service affinity")
|
ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity", "node(s) didn't match service affinity")
|
||||||
|
// ErrMaxVolumeCountExceeded is used for MaxVolumeCount predicate error.
|
||||||
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount", "node(s) exceed max volume count")
|
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount", "node(s) exceed max volume count")
|
||||||
|
// ErrNodeUnderMemoryPressure is used for NodeUnderMemoryPressure predicate error.
|
||||||
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure")
|
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure")
|
||||||
|
// ErrNodeUnderDiskPressure is used for NodeUnderDiskPressure predicate error.
|
||||||
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure", "node(s) had disk pressure")
|
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure", "node(s) had disk pressure")
|
||||||
|
// ErrNodeOutOfDisk is used for NodeOutOfDisk predicate error.
|
||||||
ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk", "node(s) were out of disk space")
|
ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk", "node(s) were out of disk space")
|
||||||
|
// ErrNodeNotReady is used for NodeNotReady predicate error.
|
||||||
ErrNodeNotReady = newPredicateFailureError("NodeNotReady", "node(s) were not ready")
|
ErrNodeNotReady = newPredicateFailureError("NodeNotReady", "node(s) were not ready")
|
||||||
|
// ErrNodeNetworkUnavailable is used for NodeNetworkUnavailable predicate error.
|
||||||
ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable", "node(s) had unavailable network")
|
ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable", "node(s) had unavailable network")
|
||||||
|
// ErrNodeUnschedulable is used for NodeUnschedulable predicate error.
|
||||||
ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable", "node(s) were unschedulable")
|
ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable", "node(s) were unschedulable")
|
||||||
|
// ErrNodeUnknownCondition is used for NodeUnknownCondition predicate error.
|
||||||
ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition", "node(s) had unknown conditions")
|
ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition", "node(s) had unknown conditions")
|
||||||
|
// ErrVolumeNodeConflict is used for VolumeNodeAffinityConflict predicate error.
|
||||||
ErrVolumeNodeConflict = newPredicateFailureError("VolumeNodeAffinityConflict", "node(s) had volume node affinity conflict")
|
ErrVolumeNodeConflict = newPredicateFailureError("VolumeNodeAffinityConflict", "node(s) had volume node affinity conflict")
|
||||||
|
// ErrVolumeBindConflict is used for VolumeBindingNoMatch predicate error.
|
||||||
ErrVolumeBindConflict = newPredicateFailureError("VolumeBindingNoMatch", "node(s) didn't find available persistent volumes to bind")
|
ErrVolumeBindConflict = newPredicateFailureError("VolumeBindingNoMatch", "node(s) didn't find available persistent volumes to bind")
|
||||||
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
|
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
|
||||||
// as ErrFakePredicate.
|
// as ErrFakePredicate.
|
||||||
|
@ -67,6 +90,7 @@ type InsufficientResourceError struct {
|
||||||
capacity int64
|
capacity int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewInsufficientResourceError returns an InsufficientResourceError.
|
||||||
func NewInsufficientResourceError(resourceName v1.ResourceName, requested, used, capacity int64) *InsufficientResourceError {
|
func NewInsufficientResourceError(resourceName v1.ResourceName, requested, used, capacity int64) *InsufficientResourceError {
|
||||||
return &InsufficientResourceError{
|
return &InsufficientResourceError{
|
||||||
ResourceName: resourceName,
|
ResourceName: resourceName,
|
||||||
|
@ -81,14 +105,17 @@ func (e *InsufficientResourceError) Error() string {
|
||||||
e.ResourceName, e.requested, e.used, e.capacity)
|
e.ResourceName, e.requested, e.used, e.capacity)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetReason returns the reason of the InsufficientResourceError.
|
||||||
func (e *InsufficientResourceError) GetReason() string {
|
func (e *InsufficientResourceError) GetReason() string {
|
||||||
return fmt.Sprintf("Insufficient %v", e.ResourceName)
|
return fmt.Sprintf("Insufficient %v", e.ResourceName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetInsufficientAmount returns the amount of the insufficient resource of the error.
|
||||||
func (e *InsufficientResourceError) GetInsufficientAmount() int64 {
|
func (e *InsufficientResourceError) GetInsufficientAmount() int64 {
|
||||||
return e.requested - (e.capacity - e.used)
|
return e.requested - (e.capacity - e.used)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PredicateFailureError describes a failure error of predicate.
|
||||||
type PredicateFailureError struct {
|
type PredicateFailureError struct {
|
||||||
PredicateName string
|
PredicateName string
|
||||||
PredicateDesc string
|
PredicateDesc string
|
||||||
|
@ -102,18 +129,22 @@ func (e *PredicateFailureError) Error() string {
|
||||||
return fmt.Sprintf("Predicate %s failed", e.PredicateName)
|
return fmt.Sprintf("Predicate %s failed", e.PredicateName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetReason returns the reason of the PredicateFailureError.
|
||||||
func (e *PredicateFailureError) GetReason() string {
|
func (e *PredicateFailureError) GetReason() string {
|
||||||
return e.PredicateDesc
|
return e.PredicateDesc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FailureReason describes a failure reason.
|
||||||
type FailureReason struct {
|
type FailureReason struct {
|
||||||
reason string
|
reason string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewFailureReason creates a FailureReason with message.
|
||||||
func NewFailureReason(msg string) *FailureReason {
|
func NewFailureReason(msg string) *FailureReason {
|
||||||
return &FailureReason{reason: msg}
|
return &FailureReason{reason: msg}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetReason returns the reason of the FailureReason.
|
||||||
func (e *FailureReason) GetReason() string {
|
func (e *FailureReason) GetReason() string {
|
||||||
return e.reason
|
return e.reason
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// PredicateMetadataFactory defines a factory of predicate metadata.
|
||||||
type PredicateMetadataFactory struct {
|
type PredicateMetadataFactory struct {
|
||||||
podLister algorithm.PodLister
|
podLister algorithm.PodLister
|
||||||
}
|
}
|
||||||
|
@ -57,18 +58,20 @@ type predicateMetadata struct {
|
||||||
// Ensure that predicateMetadata implements algorithm.PredicateMetadata.
|
// Ensure that predicateMetadata implements algorithm.PredicateMetadata.
|
||||||
var _ algorithm.PredicateMetadata = &predicateMetadata{}
|
var _ algorithm.PredicateMetadata = &predicateMetadata{}
|
||||||
|
|
||||||
// PredicateMetadataProducer: Helper types/variables...
|
// PredicateMetadataProducer function produces predicate metadata.
|
||||||
type PredicateMetadataProducer func(pm *predicateMetadata)
|
type PredicateMetadataProducer func(pm *predicateMetadata)
|
||||||
|
|
||||||
var predicateMetaProducerRegisterLock sync.Mutex
|
var predicateMetaProducerRegisterLock sync.Mutex
|
||||||
var predicateMetadataProducers map[string]PredicateMetadataProducer = make(map[string]PredicateMetadataProducer)
|
var predicateMetadataProducers = make(map[string]PredicateMetadataProducer)
|
||||||
|
|
||||||
|
// RegisterPredicateMetadataProducer registers a PredicateMetadataProducer.
|
||||||
func RegisterPredicateMetadataProducer(predicateName string, precomp PredicateMetadataProducer) {
|
func RegisterPredicateMetadataProducer(predicateName string, precomp PredicateMetadataProducer) {
|
||||||
predicateMetaProducerRegisterLock.Lock()
|
predicateMetaProducerRegisterLock.Lock()
|
||||||
defer predicateMetaProducerRegisterLock.Unlock()
|
defer predicateMetaProducerRegisterLock.Unlock()
|
||||||
predicateMetadataProducers[predicateName] = precomp
|
predicateMetadataProducers[predicateName] = precomp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewPredicateMetadataFactory creates a PredicateMetadataFactory.
|
||||||
func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.PredicateMetadataProducer {
|
func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.PredicateMetadataProducer {
|
||||||
factory := &PredicateMetadataFactory{
|
factory := &PredicateMetadataFactory{
|
||||||
podLister,
|
podLister,
|
||||||
|
@ -105,7 +108,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf
|
||||||
func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
|
func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
|
||||||
deletedPodFullName := schedutil.GetPodFullName(deletedPod)
|
deletedPodFullName := schedutil.GetPodFullName(deletedPod)
|
||||||
if deletedPodFullName == schedutil.GetPodFullName(meta.pod) {
|
if deletedPodFullName == schedutil.GetPodFullName(meta.pod) {
|
||||||
return fmt.Errorf("deletedPod and meta.pod must not be the same.")
|
return fmt.Errorf("deletedPod and meta.pod must not be the same")
|
||||||
}
|
}
|
||||||
// Delete any anti-affinity rule from the deletedPod.
|
// Delete any anti-affinity rule from the deletedPod.
|
||||||
delete(meta.matchingAntiAffinityTerms, deletedPodFullName)
|
delete(meta.matchingAntiAffinityTerms, deletedPodFullName)
|
||||||
|
@ -132,10 +135,10 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
|
||||||
func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error {
|
func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error {
|
||||||
addedPodFullName := schedutil.GetPodFullName(addedPod)
|
addedPodFullName := schedutil.GetPodFullName(addedPod)
|
||||||
if addedPodFullName == schedutil.GetPodFullName(meta.pod) {
|
if addedPodFullName == schedutil.GetPodFullName(meta.pod) {
|
||||||
return fmt.Errorf("addedPod and meta.pod must not be the same.")
|
return fmt.Errorf("addedPod and meta.pod must not be the same")
|
||||||
}
|
}
|
||||||
if nodeInfo.Node() == nil {
|
if nodeInfo.Node() == nil {
|
||||||
return fmt.Errorf("Invalid node in nodeInfo.")
|
return fmt.Errorf("invalid node in nodeInfo")
|
||||||
}
|
}
|
||||||
// Add matching anti-affinity terms of the addedPod to the map.
|
// Add matching anti-affinity terms of the addedPod to the map.
|
||||||
podMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(meta.pod, addedPod, nodeInfo.Node())
|
podMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(meta.pod, addedPod, nodeInfo.Node())
|
||||||
|
|
|
@ -92,24 +92,24 @@ var _ = sort.Interface(&sortableServices{})
|
||||||
// Note: this function does not compare podRequest.
|
// Note: this function does not compare podRequest.
|
||||||
func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
|
func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
|
||||||
if !reflect.DeepEqual(meta1.pod, meta2.pod) {
|
if !reflect.DeepEqual(meta1.pod, meta2.pod) {
|
||||||
return fmt.Errorf("pods are not the same.")
|
return fmt.Errorf("pods are not the same")
|
||||||
}
|
}
|
||||||
if meta1.podBestEffort != meta2.podBestEffort {
|
if meta1.podBestEffort != meta2.podBestEffort {
|
||||||
return fmt.Errorf("podBestEfforts are not equal.")
|
return fmt.Errorf("podBestEfforts are not equal")
|
||||||
}
|
}
|
||||||
if meta1.serviceAffinityInUse != meta1.serviceAffinityInUse {
|
if meta1.serviceAffinityInUse != meta1.serviceAffinityInUse {
|
||||||
return fmt.Errorf("serviceAffinityInUses are not equal.")
|
return fmt.Errorf("serviceAffinityInUses are not equal")
|
||||||
}
|
}
|
||||||
if len(meta1.podPorts) != len(meta2.podPorts) {
|
if len(meta1.podPorts) != len(meta2.podPorts) {
|
||||||
return fmt.Errorf("podPorts are not equal.")
|
return fmt.Errorf("podPorts are not equal")
|
||||||
}
|
}
|
||||||
for !reflect.DeepEqual(meta1.podPorts, meta2.podPorts) {
|
for !reflect.DeepEqual(meta1.podPorts, meta2.podPorts) {
|
||||||
return fmt.Errorf("podPorts are not equal.")
|
return fmt.Errorf("podPorts are not equal")
|
||||||
}
|
}
|
||||||
sortAntiAffinityTerms(meta1.matchingAntiAffinityTerms)
|
sortAntiAffinityTerms(meta1.matchingAntiAffinityTerms)
|
||||||
sortAntiAffinityTerms(meta2.matchingAntiAffinityTerms)
|
sortAntiAffinityTerms(meta2.matchingAntiAffinityTerms)
|
||||||
if !reflect.DeepEqual(meta1.matchingAntiAffinityTerms, meta2.matchingAntiAffinityTerms) {
|
if !reflect.DeepEqual(meta1.matchingAntiAffinityTerms, meta2.matchingAntiAffinityTerms) {
|
||||||
return fmt.Errorf("matchingAntiAffinityTerms are not euqal.")
|
return fmt.Errorf("matchingAntiAffinityTerms are not euqal")
|
||||||
}
|
}
|
||||||
if meta1.serviceAffinityInUse {
|
if meta1.serviceAffinityInUse {
|
||||||
sortablePods1 := sortablePods(meta1.serviceAffinityMatchingPodList)
|
sortablePods1 := sortablePods(meta1.serviceAffinityMatchingPodList)
|
||||||
|
@ -117,7 +117,7 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
|
||||||
sortablePods2 := sortablePods(meta2.serviceAffinityMatchingPodList)
|
sortablePods2 := sortablePods(meta2.serviceAffinityMatchingPodList)
|
||||||
sort.Sort(sortablePods2)
|
sort.Sort(sortablePods2)
|
||||||
if !reflect.DeepEqual(sortablePods1, sortablePods2) {
|
if !reflect.DeepEqual(sortablePods1, sortablePods2) {
|
||||||
return fmt.Errorf("serviceAffinityMatchingPodLists are not euqal.")
|
return fmt.Errorf("serviceAffinityMatchingPodLists are not euqal")
|
||||||
}
|
}
|
||||||
|
|
||||||
sortableServices1 := sortableServices(meta1.serviceAffinityMatchingPodServices)
|
sortableServices1 := sortableServices(meta1.serviceAffinityMatchingPodServices)
|
||||||
|
@ -125,7 +125,7 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
|
||||||
sortableServices2 := sortableServices(meta2.serviceAffinityMatchingPodServices)
|
sortableServices2 := sortableServices(meta2.serviceAffinityMatchingPodServices)
|
||||||
sort.Sort(sortableServices2)
|
sort.Sort(sortableServices2)
|
||||||
if !reflect.DeepEqual(sortableServices1, sortableServices2) {
|
if !reflect.DeepEqual(sortableServices1, sortableServices2) {
|
||||||
return fmt.Errorf("serviceAffinityMatchingPodServices are not euqal.")
|
return fmt.Errorf("serviceAffinityMatchingPodServices are not euqal")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -48,24 +48,43 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// MatchInterPodAffinityPred defines the name of predicate MatchInterPodAffinity.
|
||||||
MatchInterPodAffinityPred = "MatchInterPodAffinity"
|
MatchInterPodAffinityPred = "MatchInterPodAffinity"
|
||||||
|
// CheckVolumeBindingPred defines the name of predicate CheckVolumeBinding.
|
||||||
CheckVolumeBindingPred = "CheckVolumeBinding"
|
CheckVolumeBindingPred = "CheckVolumeBinding"
|
||||||
|
// CheckNodeConditionPred defines the name of predicate CheckNodeCondition.
|
||||||
CheckNodeConditionPred = "CheckNodeCondition"
|
CheckNodeConditionPred = "CheckNodeCondition"
|
||||||
|
// GeneralPred defines the name of predicate GeneralPredicates.
|
||||||
GeneralPred = "GeneralPredicates"
|
GeneralPred = "GeneralPredicates"
|
||||||
|
// HostNamePred defines the name of predicate HostName.
|
||||||
HostNamePred = "HostName"
|
HostNamePred = "HostName"
|
||||||
|
// PodFitsHostPortsPred defines the name of predicate PodFitsHostPorts.
|
||||||
PodFitsHostPortsPred = "PodFitsHostPorts"
|
PodFitsHostPortsPred = "PodFitsHostPorts"
|
||||||
|
// MatchNodeSelectorPred defines the name of predicate MatchNodeSelector.
|
||||||
MatchNodeSelectorPred = "MatchNodeSelector"
|
MatchNodeSelectorPred = "MatchNodeSelector"
|
||||||
|
// PodFitsResourcesPred defines the name of predicate PodFitsResources.
|
||||||
PodFitsResourcesPred = "PodFitsResources"
|
PodFitsResourcesPred = "PodFitsResources"
|
||||||
|
// NoDiskConflictPred defines the name of predicate NoDiskConflict.
|
||||||
NoDiskConflictPred = "NoDiskConflict"
|
NoDiskConflictPred = "NoDiskConflict"
|
||||||
|
// PodToleratesNodeTaintsPred defines the name of predicate PodToleratesNodeTaints.
|
||||||
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
|
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
|
||||||
|
// PodToleratesNodeNoExecuteTaintsPred defines the name of predicate PodToleratesNodeNoExecuteTaints.
|
||||||
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
|
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
|
||||||
|
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
|
||||||
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
|
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
|
||||||
|
// checkServiceAffinityPred defines the name of predicate checkServiceAffinity.
|
||||||
checkServiceAffinityPred = "checkServiceAffinity"
|
checkServiceAffinityPred = "checkServiceAffinity"
|
||||||
|
// MaxEBSVolumeCountPred defines the name of predicate MaxEBSVolumeCount.
|
||||||
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
|
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
|
||||||
|
// MaxGCEPDVolumeCountPred defines the name of predicate MaxGCEPDVolumeCount.
|
||||||
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
|
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
|
||||||
|
// MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount.
|
||||||
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
|
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
|
||||||
|
// NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict.
|
||||||
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
|
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
|
||||||
|
// CheckNodeMemoryPressurePred defines the name of predicate CheckNodeMemoryPressure.
|
||||||
CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure"
|
CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure"
|
||||||
|
// CheckNodeDiskPressurePred defines the name of predicate CheckNodeDiskPressure.
|
||||||
CheckNodeDiskPressurePred = "CheckNodeDiskPressure"
|
CheckNodeDiskPressurePred = "CheckNodeDiskPressure"
|
||||||
|
|
||||||
// DefaultMaxEBSVolumes is the limit for volumes attached to an instance.
|
// DefaultMaxEBSVolumes is the limit for volumes attached to an instance.
|
||||||
|
@ -83,11 +102,11 @@ const (
|
||||||
// KubeMaxPDVols defines the maximum number of PD Volumes per kubelet
|
// KubeMaxPDVols defines the maximum number of PD Volumes per kubelet
|
||||||
KubeMaxPDVols = "KUBE_MAX_PD_VOLS"
|
KubeMaxPDVols = "KUBE_MAX_PD_VOLS"
|
||||||
|
|
||||||
// for EBSVolumeFilter
|
// EBSVolumeFilterType defines the filter name for EBSVolumeFilter.
|
||||||
EBSVolumeFilterType = "EBS"
|
EBSVolumeFilterType = "EBS"
|
||||||
// for GCEPDVolumeFilter
|
// GCEPDVolumeFilterType defines the filter name for GCEPDVolumeFilter.
|
||||||
GCEPDVolumeFilterType = "GCE"
|
GCEPDVolumeFilterType = "GCE"
|
||||||
// for AzureDiskVolumeFilter
|
// AzureDiskVolumeFilterType defines the filter name for AzureDiskVolumeFilter.
|
||||||
AzureDiskVolumeFilterType = "AzureDisk"
|
AzureDiskVolumeFilterType = "AzureDisk"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -114,11 +133,12 @@ var (
|
||||||
CheckNodeMemoryPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
|
CheckNodeMemoryPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeInfo: Other types for predicate functions...
|
// NodeInfo interface represents anything that can get node object from node ID.
|
||||||
type NodeInfo interface {
|
type NodeInfo interface {
|
||||||
GetNodeInfo(nodeID string) (*v1.Node, error)
|
GetNodeInfo(nodeID string) (*v1.Node, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PersistentVolumeInfo interface represents anything that can get persistent volume object by PV ID.
|
||||||
type PersistentVolumeInfo interface {
|
type PersistentVolumeInfo interface {
|
||||||
GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)
|
GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)
|
||||||
}
|
}
|
||||||
|
@ -128,18 +148,23 @@ type CachedPersistentVolumeInfo struct {
|
||||||
corelisters.PersistentVolumeLister
|
corelisters.PersistentVolumeLister
|
||||||
}
|
}
|
||||||
|
|
||||||
func PredicatesOrdering() []string {
|
// Ordering returns the ordering of predicates.
|
||||||
|
func Ordering() []string {
|
||||||
return predicatesOrdering
|
return predicatesOrdering
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetPredicatesOrdering sets the ordering of predicates.
|
||||||
func SetPredicatesOrdering(names []string) {
|
func SetPredicatesOrdering(names []string) {
|
||||||
predicatesOrdering = names
|
predicatesOrdering = names
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPersistentVolumeInfo returns a persistent volume object by PV ID.
|
||||||
func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
|
func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
|
||||||
return c.Get(pvID)
|
return c.Get(pvID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PersistentVolumeClaimInfo interface represents anything that can get a PVC object in
|
||||||
|
// specified namespace with specified name.
|
||||||
type PersistentVolumeClaimInfo interface {
|
type PersistentVolumeClaimInfo interface {
|
||||||
GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)
|
GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)
|
||||||
}
|
}
|
||||||
|
@ -154,6 +179,7 @@ func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace
|
||||||
return c.PersistentVolumeClaims(namespace).Get(name)
|
return c.PersistentVolumeClaims(namespace).Get(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CachedNodeInfo implements NodeInfo
|
||||||
type CachedNodeInfo struct {
|
type CachedNodeInfo struct {
|
||||||
corelisters.NodeLister
|
corelisters.NodeLister
|
||||||
}
|
}
|
||||||
|
@ -173,6 +199,7 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) {
|
||||||
return node, nil
|
return node, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StorageClassInfo interface represents anything that can get a storage class object by class name.
|
||||||
type StorageClassInfo interface {
|
type StorageClassInfo interface {
|
||||||
GetStorageClassInfo(className string) (*storagev1.StorageClass, error)
|
GetStorageClassInfo(className string) (*storagev1.StorageClass, error)
|
||||||
}
|
}
|
||||||
|
@ -182,6 +209,7 @@ type CachedStorageClassInfo struct {
|
||||||
storagelisters.StorageClassLister
|
storagelisters.StorageClassLister
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetStorageClassInfo get StorageClass by class name.
|
||||||
func (c *CachedStorageClassInfo) GetStorageClassInfo(className string) (*storagev1.StorageClass, error) {
|
func (c *CachedStorageClassInfo) GetStorageClassInfo(className string) (*storagev1.StorageClass, error) {
|
||||||
return c.Get(className)
|
return c.Get(className)
|
||||||
}
|
}
|
||||||
|
@ -253,6 +281,7 @@ func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *sch
|
||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxPDVolumeCountChecker contains information to check the max number of volumes for a predicate.
|
||||||
type MaxPDVolumeCountChecker struct {
|
type MaxPDVolumeCountChecker struct {
|
||||||
filter VolumeFilter
|
filter VolumeFilter
|
||||||
maxVolumes int
|
maxVolumes int
|
||||||
|
@ -341,13 +370,13 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||||
// Until we know real ID of the volume use namespace/pvcName as substitute
|
// Until we know real ID of the volume use namespace/pvcName as substitute
|
||||||
// with a random prefix (calculated and stored inside 'c' during initialization)
|
// with a random prefix (calculated and stored inside 'c' during initialization)
|
||||||
// to avoid conflicts with existing volume IDs.
|
// to avoid conflicts with existing volume IDs.
|
||||||
pvId := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName)
|
pvID := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName)
|
||||||
|
|
||||||
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
|
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
|
||||||
if err != nil || pvc == nil {
|
if err != nil || pvc == nil {
|
||||||
// if the PVC is not found, log the error and count the PV towards the PV limit
|
// if the PVC is not found, log the error and count the PV towards the PV limit
|
||||||
glog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)
|
glog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)
|
||||||
filteredVolumes[pvId] = true
|
filteredVolumes[pvID] = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,7 +387,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||||
// original PV where it was bound to -> log the error and count
|
// original PV where it was bound to -> log the error and count
|
||||||
// the PV towards the PV limit
|
// the PV towards the PV limit
|
||||||
glog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
|
glog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
|
||||||
filteredVolumes[pvId] = true
|
filteredVolumes[pvID] = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,7 +396,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||||
// if the PV is not found, log the error
|
// if the PV is not found, log the error
|
||||||
// and count the PV towards the PV limit
|
// and count the PV towards the PV limit
|
||||||
glog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
|
glog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
|
||||||
filteredVolumes[pvId] = true
|
filteredVolumes[pvID] = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -424,7 +453,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.Predicat
|
||||||
}
|
}
|
||||||
|
|
||||||
// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes
|
// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes
|
||||||
var EBSVolumeFilter VolumeFilter = VolumeFilter{
|
var EBSVolumeFilter = VolumeFilter{
|
||||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||||
if vol.AWSElasticBlockStore != nil {
|
if vol.AWSElasticBlockStore != nil {
|
||||||
return vol.AWSElasticBlockStore.VolumeID, true
|
return vol.AWSElasticBlockStore.VolumeID, true
|
||||||
|
@ -441,7 +470,7 @@ var EBSVolumeFilter VolumeFilter = VolumeFilter{
|
||||||
}
|
}
|
||||||
|
|
||||||
// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes
|
// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes
|
||||||
var GCEPDVolumeFilter VolumeFilter = VolumeFilter{
|
var GCEPDVolumeFilter = VolumeFilter{
|
||||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||||
if vol.GCEPersistentDisk != nil {
|
if vol.GCEPersistentDisk != nil {
|
||||||
return vol.GCEPersistentDisk.PDName, true
|
return vol.GCEPersistentDisk.PDName, true
|
||||||
|
@ -458,7 +487,7 @@ var GCEPDVolumeFilter VolumeFilter = VolumeFilter{
|
||||||
}
|
}
|
||||||
|
|
||||||
// AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes
|
// AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes
|
||||||
var AzureDiskVolumeFilter VolumeFilter = VolumeFilter{
|
var AzureDiskVolumeFilter = VolumeFilter{
|
||||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||||
if vol.AzureDisk != nil {
|
if vol.AzureDisk != nil {
|
||||||
return vol.AzureDisk.DiskName, true
|
return vol.AzureDisk.DiskName, true
|
||||||
|
@ -474,6 +503,7 @@ var AzureDiskVolumeFilter VolumeFilter = VolumeFilter{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VolumeZoneChecker contains information to check the volume zone for a predicate.
|
||||||
type VolumeZoneChecker struct {
|
type VolumeZoneChecker struct {
|
||||||
pvInfo PersistentVolumeInfo
|
pvInfo PersistentVolumeInfo
|
||||||
pvcInfo PersistentVolumeClaimInfo
|
pvcInfo PersistentVolumeClaimInfo
|
||||||
|
@ -818,11 +848,14 @@ func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedu
|
||||||
return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil
|
return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeLabelChecker contains information to check node labels for a predicate.
|
||||||
type NodeLabelChecker struct {
|
type NodeLabelChecker struct {
|
||||||
labels []string
|
labels []string
|
||||||
presence bool
|
presence bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the
|
||||||
|
// node labels which match a filter that it requests.
|
||||||
func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate {
|
func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate {
|
||||||
labelChecker := &NodeLabelChecker{
|
labelChecker := &NodeLabelChecker{
|
||||||
labels: labels,
|
labels: labels,
|
||||||
|
@ -860,6 +893,7 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.Pr
|
||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ServiceAffinity defines a struct used for create service affinity predicates.
|
||||||
type ServiceAffinity struct {
|
type ServiceAffinity struct {
|
||||||
podLister algorithm.PodLister
|
podLister algorithm.PodLister
|
||||||
serviceLister algorithm.ServiceLister
|
serviceLister algorithm.ServiceLister
|
||||||
|
@ -889,6 +923,7 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
|
||||||
pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace)
|
pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewServiceAffinityPredicate creates a ServiceAffinity.
|
||||||
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataProducer) {
|
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataProducer) {
|
||||||
affinity := &ServiceAffinity{
|
affinity := &ServiceAffinity{
|
||||||
podLister: podLister,
|
podLister: podLister,
|
||||||
|
@ -1071,11 +1106,13 @@ func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo
|
||||||
return len(predicateFails) == 0, predicateFails, nil
|
return len(predicateFails) == 0, predicateFails, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PodAffinityChecker contains information to check pod affinity.
|
||||||
type PodAffinityChecker struct {
|
type PodAffinityChecker struct {
|
||||||
info NodeInfo
|
info NodeInfo
|
||||||
podLister algorithm.PodLister
|
podLister algorithm.PodLister
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewPodAffinityPredicate creates a PodAffinityChecker.
|
||||||
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate {
|
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate {
|
||||||
checker := &PodAffinityChecker{
|
checker := &PodAffinityChecker{
|
||||||
info: info,
|
info: info,
|
||||||
|
@ -1151,6 +1188,7 @@ func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, pods []*v
|
||||||
return false, matchingPodExists, nil
|
return false, matchingPodExists, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPodAffinityTerms gets pod affinity terms by a pod affinity object.
|
||||||
func GetPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) {
|
func GetPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) {
|
||||||
if podAffinity != nil {
|
if podAffinity != nil {
|
||||||
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||||
|
@ -1164,6 +1202,7 @@ func GetPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTer
|
||||||
return terms
|
return terms
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPodAntiAffinityTerms gets pod affinity terms by a pod anti-affinity.
|
||||||
func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
|
func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
|
||||||
if podAntiAffinity != nil {
|
if podAntiAffinity != nil {
|
||||||
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||||
|
@ -1496,6 +1535,7 @@ func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata,
|
||||||
return len(reasons) == 0, reasons, nil
|
return len(reasons) == 0, reasons, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VolumeBindingChecker contains information to check a volume binding.
|
||||||
type VolumeBindingChecker struct {
|
type VolumeBindingChecker struct {
|
||||||
binder *volumebinder.VolumeBinder
|
binder *volumebinder.VolumeBinder
|
||||||
}
|
}
|
||||||
|
|
|
@ -3145,7 +3145,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||||
nodeInfo.SetNode(&node)
|
nodeInfo.SetNode(&node)
|
||||||
nodeInfoMap := map[string]*schedulercache.NodeInfo{node.Name: nodeInfo}
|
nodeInfoMap := map[string]*schedulercache.NodeInfo{node.Name: nodeInfo}
|
||||||
|
|
||||||
var meta algorithm.PredicateMetadata = nil
|
var meta algorithm.PredicateMetadata
|
||||||
|
|
||||||
if !test.nometa {
|
if !test.nometa {
|
||||||
meta = PredicateMetadata(test.pod, nodeInfoMap)
|
meta = PredicateMetadata(test.pod, nodeInfoMap)
|
||||||
|
|
|
@ -23,8 +23,10 @@ import (
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// FakePersistentVolumeClaimInfo declares a []v1.PersistentVolumeClaim type for testing.
|
||||||
type FakePersistentVolumeClaimInfo []v1.PersistentVolumeClaim
|
type FakePersistentVolumeClaimInfo []v1.PersistentVolumeClaim
|
||||||
|
|
||||||
|
// GetPersistentVolumeClaimInfo gets PVC matching the namespace and PVC ID.
|
||||||
func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, pvcID string) (*v1.PersistentVolumeClaim, error) {
|
func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, pvcID string) (*v1.PersistentVolumeClaim, error) {
|
||||||
for _, pvc := range pvcs {
|
for _, pvc := range pvcs {
|
||||||
if pvc.Name == pvcID && pvc.Namespace == namespace {
|
if pvc.Name == pvcID && pvc.Namespace == namespace {
|
||||||
|
@ -34,15 +36,19 @@ func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace
|
||||||
return nil, fmt.Errorf("Unable to find persistent volume claim: %s/%s", namespace, pvcID)
|
return nil, fmt.Errorf("Unable to find persistent volume claim: %s/%s", namespace, pvcID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FakeNodeInfo declares a v1.Node type for testing.
|
||||||
type FakeNodeInfo v1.Node
|
type FakeNodeInfo v1.Node
|
||||||
|
|
||||||
|
// GetNodeInfo return a fake node info object.
|
||||||
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
||||||
node := v1.Node(n)
|
node := v1.Node(n)
|
||||||
return &node, nil
|
return &node, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FakeNodeListInfo declares a []v1.Node type for testing.
|
||||||
type FakeNodeListInfo []v1.Node
|
type FakeNodeListInfo []v1.Node
|
||||||
|
|
||||||
|
// GetNodeInfo returns a fake node object in the fake nodes.
|
||||||
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
if node.Name == nodeName {
|
if node.Name == nodeName {
|
||||||
|
@ -52,8 +58,10 @@ func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
||||||
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
|
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FakePersistentVolumeInfo declares a []v1.PersistentVolume type for testing.
|
||||||
type FakePersistentVolumeInfo []v1.PersistentVolume
|
type FakePersistentVolumeInfo []v1.PersistentVolume
|
||||||
|
|
||||||
|
// GetPersistentVolumeInfo returns a fake PV object in the fake PVs by PV ID.
|
||||||
func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
|
func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
|
||||||
for _, pv := range pvs {
|
for _, pv := range pvs {
|
||||||
if pv.Name == pvID {
|
if pv.Name == pvID {
|
||||||
|
@ -63,8 +71,10 @@ func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.Pe
|
||||||
return nil, fmt.Errorf("Unable to find persistent volume: %s", pvID)
|
return nil, fmt.Errorf("Unable to find persistent volume: %s", pvID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FakeStorageClassInfo declares a []storagev1.StorageClass type for testing.
|
||||||
type FakeStorageClassInfo []storagev1.StorageClass
|
type FakeStorageClassInfo []storagev1.StorageClass
|
||||||
|
|
||||||
|
// GetStorageClassInfo returns a fake storage class object in the fake storage classes by name.
|
||||||
func (classes FakeStorageClassInfo) GetStorageClassInfo(name string) (*storagev1.StorageClass, error) {
|
func (classes FakeStorageClassInfo) GetStorageClassInfo(name string) (*storagev1.StorageClass, error) {
|
||||||
for _, sc := range classes {
|
for _, sc := range classes {
|
||||||
if sc.Name == name {
|
if sc.Name == name {
|
||||||
|
|
|
@ -92,19 +92,20 @@ func (e *EquivalencePodGenerator) getEquivalencePod(pod *v1.Pod) interface{} {
|
||||||
// to be equivalent
|
// to be equivalent
|
||||||
for _, ref := range pod.OwnerReferences {
|
for _, ref := range pod.OwnerReferences {
|
||||||
if ref.Controller != nil && *ref.Controller {
|
if ref.Controller != nil && *ref.Controller {
|
||||||
if pvcSet, err := e.getPVCSet(pod); err == nil {
|
pvcSet, err := e.getPVCSet(pod)
|
||||||
|
if err == nil {
|
||||||
// A pod can only belongs to one controller, so let's return.
|
// A pod can only belongs to one controller, so let's return.
|
||||||
return &EquivalencePod{
|
return &EquivalencePod{
|
||||||
ControllerRef: ref,
|
ControllerRef: ref,
|
||||||
PVCSet: pvcSet,
|
PVCSet: pvcSet,
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
|
|
||||||
// If error encountered, log warning and return nil (i.e. no equivalent pod found)
|
// If error encountered, log warning and return nil (i.e. no equivalent pod found)
|
||||||
glog.Warningf("[EquivalencePodGenerator] for pod: %v failed due to: %v", pod.GetName(), err)
|
glog.Warningf("[EquivalencePodGenerator] for pod: %v failed due to: %v", pod.GetName(), err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestImageLocalityPriority(t *testing.T) {
|
func TestImageLocalityPriority(t *testing.T) {
|
||||||
test_40_250 := v1.PodSpec{
|
test40250 := v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Image: "gcr.io/40",
|
Image: "gcr.io/40",
|
||||||
|
@ -39,7 +39,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
test_40_140 := v1.PodSpec{
|
test40140 := v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Image: "gcr.io/40",
|
Image: "gcr.io/40",
|
||||||
|
@ -50,7 +50,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
test_min_max := v1.PodSpec{
|
testMinMax := v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Image: "gcr.io/10",
|
Image: "gcr.io/10",
|
||||||
|
@ -61,7 +61,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
node_40_140_2000 := v1.NodeStatus{
|
node401402000 := v1.NodeStatus{
|
||||||
Images: []v1.ContainerImage{
|
Images: []v1.ContainerImage{
|
||||||
{
|
{
|
||||||
Names: []string{
|
Names: []string{
|
||||||
|
@ -87,7 +87,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
node_250_10 := v1.NodeStatus{
|
node25010 := v1.NodeStatus{
|
||||||
Images: []v1.ContainerImage{
|
Images: []v1.ContainerImage{
|
||||||
{
|
{
|
||||||
Names: []string{
|
Names: []string{
|
||||||
|
@ -122,8 +122,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||||
// Node2
|
// Node2
|
||||||
// Image: gcr.io/250 250MB
|
// Image: gcr.io/250 250MB
|
||||||
// Score: (250M-23M)/97.7M + 1 = 3
|
// Score: (250M-23M)/97.7M + 1 = 3
|
||||||
pod: &v1.Pod{Spec: test_40_250},
|
pod: &v1.Pod{Spec: test40250},
|
||||||
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
|
||||||
test: "two images spread on two nodes, prefer the larger image one",
|
test: "two images spread on two nodes, prefer the larger image one",
|
||||||
},
|
},
|
||||||
|
@ -137,8 +137,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||||
// Node2
|
// Node2
|
||||||
// Image: not present
|
// Image: not present
|
||||||
// Score: 0
|
// Score: 0
|
||||||
pod: &v1.Pod{Spec: test_40_140},
|
pod: &v1.Pod{Spec: test40140},
|
||||||
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
|
||||||
test: "two images on one node, prefer this node",
|
test: "two images on one node, prefer this node",
|
||||||
},
|
},
|
||||||
|
@ -152,8 +152,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||||
// Node2
|
// Node2
|
||||||
// Image: gcr.io/10 10MB
|
// Image: gcr.io/10 10MB
|
||||||
// Score: 10 < min score = 0
|
// Score: 10 < min score = 0
|
||||||
pod: &v1.Pod{Spec: test_min_max},
|
pod: &v1.Pod{Spec: testMinMax},
|
||||||
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||||
test: "if exceed limit, use limit",
|
test: "if exceed limit, use limit",
|
||||||
},
|
},
|
||||||
|
|
|
@ -32,6 +32,7 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// InterPodAffinity contains information to calculate inter pod affinity.
|
||||||
type InterPodAffinity struct {
|
type InterPodAffinity struct {
|
||||||
info predicates.NodeInfo
|
info predicates.NodeInfo
|
||||||
nodeLister algorithm.NodeLister
|
nodeLister algorithm.NodeLister
|
||||||
|
@ -39,6 +40,7 @@ type InterPodAffinity struct {
|
||||||
hardPodAffinityWeight int32
|
hardPodAffinityWeight int32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewInterPodAffinityPriority creates an InterPodAffinity.
|
||||||
func NewInterPodAffinityPriority(
|
func NewInterPodAffinityPriority(
|
||||||
info predicates.NodeInfo,
|
info predicates.NodeInfo,
|
||||||
nodeLister algorithm.NodeLister,
|
nodeLister algorithm.NodeLister,
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
var (
|
var (
|
||||||
leastResourcePriority = &ResourceAllocationPriority{"LeastResourceAllocation", leastResourceScorer}
|
leastResourcePriority = &ResourceAllocationPriority{"LeastResourceAllocation", leastResourceScorer}
|
||||||
|
|
||||||
// LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.
|
// LeastRequestedPriorityMap is a priority function that favors nodes with fewer requested resources.
|
||||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
|
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
|
||||||
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
|
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
|
||||||
//
|
//
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// PriorityMetadataFactory is a factory to produce PriorityMetadata.
|
||||||
type PriorityMetadataFactory struct {
|
type PriorityMetadataFactory struct {
|
||||||
serviceLister algorithm.ServiceLister
|
serviceLister algorithm.ServiceLister
|
||||||
controllerLister algorithm.ControllerLister
|
controllerLister algorithm.ControllerLister
|
||||||
|
@ -32,6 +33,7 @@ type PriorityMetadataFactory struct {
|
||||||
statefulSetLister algorithm.StatefulSetLister
|
statefulSetLister algorithm.StatefulSetLister
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewPriorityMetadataFactory creates a PriorityMetadataFactory.
|
||||||
func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.PriorityMetadataProducer {
|
func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.PriorityMetadataProducer {
|
||||||
factory := &PriorityMetadataFactory{
|
factory := &PriorityMetadataFactory{
|
||||||
serviceLister: serviceLister,
|
serviceLister: serviceLister,
|
||||||
|
|
|
@ -32,7 +32,7 @@ import (
|
||||||
|
|
||||||
func TestPriorityMetadata(t *testing.T) {
|
func TestPriorityMetadata(t *testing.T) {
|
||||||
nonZeroReqs := &schedulercache.Resource{}
|
nonZeroReqs := &schedulercache.Resource{}
|
||||||
nonZeroReqs.MilliCPU = priorityutil.DefaultMilliCpuRequest
|
nonZeroReqs.MilliCPU = priorityutil.DefaultMilliCPURequest
|
||||||
nonZeroReqs.Memory = priorityutil.DefaultMemoryRequest
|
nonZeroReqs.Memory = priorityutil.DefaultMemoryRequest
|
||||||
|
|
||||||
specifiedReqs := &schedulercache.Resource{}
|
specifiedReqs := &schedulercache.Resource{}
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
var (
|
var (
|
||||||
mostResourcePriority = &ResourceAllocationPriority{"MostResourceAllocation", mostResourceScorer}
|
mostResourcePriority = &ResourceAllocationPriority{"MostResourceAllocation", mostResourceScorer}
|
||||||
|
|
||||||
// MostRequestedPriority is a priority function that favors nodes with most requested resources.
|
// MostRequestedPriorityMap is a priority function that favors nodes with most requested resources.
|
||||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||||
// based on the maximum of the average of the fraction of requested to capacity.
|
// based on the maximum of the average of the fraction of requested to capacity.
|
||||||
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2
|
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2
|
||||||
|
|
|
@ -83,7 +83,7 @@ func TestMostRequested(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
bigCpuAndMemory := v1.PodSpec{
|
bigCPUAndMemory := v1.PodSpec{
|
||||||
NodeName: "machine1",
|
NodeName: "machine1",
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
|
@ -201,7 +201,7 @@ func TestMostRequested(t *testing.T) {
|
||||||
Memory Score: 9000 > 8000 return 0
|
Memory Score: 9000 > 8000 return 0
|
||||||
Node2 Score: (5 + 0) / 2 = 2
|
Node2 Score: (5 + 0) / 2 = 2
|
||||||
*/
|
*/
|
||||||
pod: &v1.Pod{Spec: bigCpuAndMemory},
|
pod: &v1.Pod{Spec: bigCPUAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}},
|
||||||
test: "resources requested with more than the node, pods scheduled with resources",
|
test: "resources requested with more than the node, pods scheduled with resources",
|
||||||
|
|
|
@ -26,7 +26,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CalculateNodeAffinityPriority prioritizes nodes according to node affinity scheduling preferences
|
// CalculateNodeAffinityPriorityMap prioritizes nodes according to node affinity scheduling preferences
|
||||||
// indicated in PreferredDuringSchedulingIgnoredDuringExecution. Each time a node match a preferredSchedulingTerm,
|
// indicated in PreferredDuringSchedulingIgnoredDuringExecution. Each time a node match a preferredSchedulingTerm,
|
||||||
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
|
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
|
||||||
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
|
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
|
||||||
|
@ -74,4 +74,5 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CalculateNodeAffinityPriorityReduce is a reduce function for node affinity priority calculation.
|
||||||
var CalculateNodeAffinityPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, false)
|
var CalculateNodeAffinityPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, false)
|
||||||
|
|
|
@ -26,11 +26,13 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NodeLabelPrioritizer contains information to calculate node label priority.
|
||||||
type NodeLabelPrioritizer struct {
|
type NodeLabelPrioritizer struct {
|
||||||
label string
|
label string
|
||||||
presence bool
|
presence bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewNodeLabelPriority creates a NodeLabelPrioritizer.
|
||||||
func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
||||||
labelPrioritizer := &NodeLabelPrioritizer{
|
labelPrioritizer := &NodeLabelPrioritizer{
|
||||||
label: label,
|
label: label,
|
||||||
|
@ -39,7 +41,7 @@ func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFun
|
||||||
return labelPrioritizer.CalculateNodeLabelPriorityMap, nil
|
return labelPrioritizer.CalculateNodeLabelPriorityMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
|
// CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value.
|
||||||
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
|
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
|
||||||
// If presence is false, prioritizes nodes that do not have the specified label.
|
// If presence is false, prioritizes nodes that do not have the specified label.
|
||||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
|
|
|
@ -27,6 +27,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation
|
||||||
|
// "scheduler.alpha.kubernetes.io/preferAvoidPods".
|
||||||
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
|
|
|
@ -26,11 +26,14 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ResourceAllocationPriority contains information to calculate resource allocation priority.
|
||||||
type ResourceAllocationPriority struct {
|
type ResourceAllocationPriority struct {
|
||||||
Name string
|
Name string
|
||||||
scorer func(requested, allocable *schedulercache.Resource) int64
|
scorer func(requested, allocable *schedulercache.Resource) int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PriorityMap priorities nodes according to the resource allocations on the node.
|
||||||
|
// It will use `scorer` function to calculate the score.
|
||||||
func (r *ResourceAllocationPriority) PriorityMap(
|
func (r *ResourceAllocationPriority) PriorityMap(
|
||||||
pod *v1.Pod,
|
pod *v1.Pod,
|
||||||
meta interface{},
|
meta interface{},
|
||||||
|
|
|
@ -33,6 +33,7 @@ import (
|
||||||
// TODO: Any way to justify this weighting?
|
// TODO: Any way to justify this weighting?
|
||||||
const zoneWeighting float64 = 2.0 / 3.0
|
const zoneWeighting float64 = 2.0 / 3.0
|
||||||
|
|
||||||
|
// SelectorSpread contains information to calculate selector spread priority.
|
||||||
type SelectorSpread struct {
|
type SelectorSpread struct {
|
||||||
serviceLister algorithm.ServiceLister
|
serviceLister algorithm.ServiceLister
|
||||||
controllerLister algorithm.ControllerLister
|
controllerLister algorithm.ControllerLister
|
||||||
|
@ -40,6 +41,7 @@ type SelectorSpread struct {
|
||||||
statefulSetLister algorithm.StatefulSetLister
|
statefulSetLister algorithm.StatefulSetLister
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewSelectorSpreadPriority creates a SelectorSpread.
|
||||||
func NewSelectorSpreadPriority(
|
func NewSelectorSpreadPriority(
|
||||||
serviceLister algorithm.ServiceLister,
|
serviceLister algorithm.ServiceLister,
|
||||||
controllerLister algorithm.ControllerLister,
|
controllerLister algorithm.ControllerLister,
|
||||||
|
@ -125,16 +127,16 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
|
||||||
if result[i].Score > maxCountByNodeName {
|
if result[i].Score > maxCountByNodeName {
|
||||||
maxCountByNodeName = result[i].Score
|
maxCountByNodeName = result[i].Score
|
||||||
}
|
}
|
||||||
zoneId := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
|
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
|
||||||
if zoneId == "" {
|
if zoneID == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
countsByZone[zoneId] += result[i].Score
|
countsByZone[zoneID] += result[i].Score
|
||||||
}
|
}
|
||||||
|
|
||||||
for zoneId := range countsByZone {
|
for zoneID := range countsByZone {
|
||||||
if countsByZone[zoneId] > maxCountByZone {
|
if countsByZone[zoneID] > maxCountByZone {
|
||||||
maxCountByZone = countsByZone[zoneId]
|
maxCountByZone = countsByZone[zoneID]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,11 +154,11 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
|
||||||
}
|
}
|
||||||
// If there is zone information present, incorporate it
|
// If there is zone information present, incorporate it
|
||||||
if haveZones {
|
if haveZones {
|
||||||
zoneId := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
|
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
|
||||||
if zoneId != "" {
|
if zoneID != "" {
|
||||||
zoneScore := MaxPriorityFloat64
|
zoneScore := MaxPriorityFloat64
|
||||||
if maxCountByZone > 0 {
|
if maxCountByZone > 0 {
|
||||||
zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneId]) / maxCountByZoneFloat64)
|
zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64)
|
||||||
}
|
}
|
||||||
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
|
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
|
||||||
}
|
}
|
||||||
|
@ -171,12 +173,14 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ServiceAntiAffinity contains information to calculate service anti-affinity priority.
|
||||||
type ServiceAntiAffinity struct {
|
type ServiceAntiAffinity struct {
|
||||||
podLister algorithm.PodLister
|
podLister algorithm.PodLister
|
||||||
serviceLister algorithm.ServiceLister
|
serviceLister algorithm.ServiceLister
|
||||||
label string
|
label string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewServiceAntiAffinityPriority creates a ServiceAntiAffinity.
|
||||||
func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, label string) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, label string) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
||||||
antiAffinity := &ServiceAntiAffinity{
|
antiAffinity := &ServiceAntiAffinity{
|
||||||
podLister: podLister,
|
podLister: podLister,
|
||||||
|
|
|
@ -26,7 +26,10 @@ import "k8s.io/api/core/v1"
|
||||||
// consuming no resources whatsoever. We chose these values to be similar to the
|
// consuming no resources whatsoever. We chose these values to be similar to the
|
||||||
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
|
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
|
||||||
// As described in #11713, we use request instead of limit to deal with resource requirements.
|
// As described in #11713, we use request instead of limit to deal with resource requirements.
|
||||||
const DefaultMilliCpuRequest int64 = 100 // 0.1 core
|
|
||||||
|
// DefaultMilliCPURequest defines default milli cpu request number.
|
||||||
|
const DefaultMilliCPURequest int64 = 100 // 0.1 core
|
||||||
|
// DefaultMemoryRequest defines default memory request size.
|
||||||
const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
||||||
|
|
||||||
// GetNonzeroRequests returns the default resource request if none is found or what is provided on the request
|
// GetNonzeroRequests returns the default resource request if none is found or what is provided on the request
|
||||||
|
@ -36,7 +39,7 @@ func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
|
||||||
var outMilliCPU, outMemory int64
|
var outMilliCPU, outMemory int64
|
||||||
// Override if un-set, but not if explicitly set to zero
|
// Override if un-set, but not if explicitly set to zero
|
||||||
if _, found := (*requests)[v1.ResourceCPU]; !found {
|
if _, found := (*requests)[v1.ResourceCPU]; !found {
|
||||||
outMilliCPU = DefaultMilliCpuRequest
|
outMilliCPU = DefaultMilliCPURequest
|
||||||
} else {
|
} else {
|
||||||
outMilliCPU = requests.Cpu().MilliValue()
|
outMilliCPU = requests.Cpu().MilliValue()
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ func TestGetNonzeroRequests(t *testing.T) {
|
||||||
{
|
{
|
||||||
"cpu_and_memory_not_found",
|
"cpu_and_memory_not_found",
|
||||||
v1.ResourceList{},
|
v1.ResourceList{},
|
||||||
DefaultMilliCpuRequest,
|
DefaultMilliCPURequest,
|
||||||
DefaultMemoryRequest,
|
DefaultMemoryRequest,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -51,7 +51,7 @@ func TestGetNonzeroRequests(t *testing.T) {
|
||||||
v1.ResourceList{
|
v1.ResourceList{
|
||||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
v1.ResourceMemory: resource.MustParse("400Mi"),
|
||||||
},
|
},
|
||||||
DefaultMilliCpuRequest,
|
DefaultMilliCPURequest,
|
||||||
400 * 1024 * 1024,
|
400 * 1024 * 1024,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -70,6 +70,7 @@ func NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Topologies contains topologies information of nodes.
|
||||||
type Topologies struct {
|
type Topologies struct {
|
||||||
DefaultKeys []string
|
DefaultKeys []string
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// GetControllerRef gets pod's owner controller reference from a pod object.
|
||||||
func GetControllerRef(pod *v1.Pod) *metav1.OwnerReference {
|
func GetControllerRef(pod *v1.Pod) *metav1.OwnerReference {
|
||||||
if len(pod.OwnerReferences) == 0 {
|
if len(pod.OwnerReferences) == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -47,10 +47,12 @@ type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*sche
|
||||||
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
|
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
|
||||||
type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{}
|
type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{}
|
||||||
|
|
||||||
|
// PriorityFunction is a function that computes scores for all nodes.
|
||||||
// DEPRECATED
|
// DEPRECATED
|
||||||
// Use Map-Reduce pattern for priority functions.
|
// Use Map-Reduce pattern for priority functions.
|
||||||
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
|
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
|
||||||
|
|
||||||
|
// PriorityConfig is a config used for a priority function.
|
||||||
type PriorityConfig struct {
|
type PriorityConfig struct {
|
||||||
Name string
|
Name string
|
||||||
Map PriorityMapFunction
|
Map PriorityMapFunction
|
||||||
|
@ -71,10 +73,12 @@ func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*sched
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PredicateFailureReason interface represents the failure reason of a predicate.
|
||||||
type PredicateFailureReason interface {
|
type PredicateFailureReason interface {
|
||||||
GetReason() string
|
GetReason() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetEquivalencePodFunc is a function that gets a EquivalencePod from a pod.
|
||||||
type GetEquivalencePodFunc func(pod *v1.Pod) interface{}
|
type GetEquivalencePodFunc func(pod *v1.Pod) interface{}
|
||||||
|
|
||||||
// NodeLister interface represents anything that can list nodes for a scheduler.
|
// NodeLister interface represents anything that can list nodes for a scheduler.
|
||||||
|
@ -157,6 +161,7 @@ func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.Sta
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PredicateMetadata interface represents anything that can access a predicate metadata.
|
||||||
type PredicateMetadata interface {
|
type PredicateMetadata interface {
|
||||||
ShallowCopy() PredicateMetadata
|
ShallowCopy() PredicateMetadata
|
||||||
AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error
|
AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error
|
||||||
|
|
|
@ -17,18 +17,18 @@ limitations under the License.
|
||||||
package algorithm
|
package algorithm
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
// TaintNodeNotReady will be added when node is not ready
|
||||||
// TaintNodeNotReady would be automatically added by node controller
|
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||||
// when node is not ready, and removed when node becomes ready.
|
// and removed when node becomes ready.
|
||||||
TaintNodeNotReady = "node.kubernetes.io/not-ready"
|
TaintNodeNotReady = "node.kubernetes.io/not-ready"
|
||||||
|
|
||||||
// DeprecatedTaintNodeNotReady is the deprecated version of TaintNodeNotReady.
|
// DeprecatedTaintNodeNotReady is the deprecated version of TaintNodeNotReady.
|
||||||
// It is deprecated since 1.9
|
// It is deprecated since 1.9
|
||||||
DeprecatedTaintNodeNotReady = "node.alpha.kubernetes.io/notReady"
|
DeprecatedTaintNodeNotReady = "node.alpha.kubernetes.io/notReady"
|
||||||
|
|
||||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
// TaintNodeUnreachable will be added when node becomes unreachable
|
||||||
// TaintNodeUnreachable would be automatically added by node controller
|
// (corresponding to NodeReady status ConditionUnknown)
|
||||||
// when node becomes unreachable (corresponding to NodeReady status ConditionUnknown)
|
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||||
// and removed when node becomes reachable (NodeReady status ConditionTrue).
|
// and removed when node becomes reachable (NodeReady status ConditionTrue).
|
||||||
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
|
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
|
||||||
|
|
||||||
|
@ -36,28 +36,28 @@ const (
|
||||||
// It is deprecated since 1.9
|
// It is deprecated since 1.9
|
||||||
DeprecatedTaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable"
|
DeprecatedTaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable"
|
||||||
|
|
||||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
// TaintNodeOutOfDisk will be added when node becomes out of disk
|
||||||
// TaintNodeOutOfDisk would be automatically added by node controller
|
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||||
// when node becomes out of disk, and removed when node has enough disk.
|
// and removed when node has enough disk.
|
||||||
TaintNodeOutOfDisk = "node.kubernetes.io/out-of-disk"
|
TaintNodeOutOfDisk = "node.kubernetes.io/out-of-disk"
|
||||||
|
|
||||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
// TaintNodeMemoryPressure will be added when node has memory pressure
|
||||||
// TaintNodeMemoryPressure would be automatically added by node controller
|
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||||
// when node has memory pressure, and removed when node has enough memory.
|
// and removed when node has enough memory.
|
||||||
TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
|
TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
|
||||||
|
|
||||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
// TaintNodeDiskPressure will be added when node has disk pressure
|
||||||
// TaintNodeDiskPressure would be automatically added by node controller
|
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||||
// when node has disk pressure, and removed when node has enough disk.
|
// and removed when node has enough disk.
|
||||||
TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
|
TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
|
||||||
|
|
||||||
// When feature-gate for TaintBasedEvictions=true flag is enabled,
|
// TaintNodeNetworkUnavailable will be added when node's network is unavailable
|
||||||
// TaintNodeNetworkUnavailable would be automatically added by node controller
|
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||||
// when node's network is unavailable, and removed when network becomes ready.
|
// and removed when network becomes ready.
|
||||||
TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
|
TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
|
||||||
|
|
||||||
// When kubelet is started with the "external" cloud provider, then
|
// TaintExternalCloudProvider sets this taint on a node to mark it as unusable,
|
||||||
// it sets this taint on a node to mark it as unusable, until a controller
|
// when kubelet is started with the "external" cloud provider, until a controller
|
||||||
// from the cloud-controller-manager intitializes this node, and then removes
|
// from the cloud-controller-manager intitializes this node, and then removes
|
||||||
// the taint
|
// the taint
|
||||||
TaintExternalCloudProvider = "node.cloudprovider.kubernetes.io/uninitialized"
|
TaintExternalCloudProvider = "node.cloudprovider.kubernetes.io/uninitialized"
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/runtime/serializer/json"
|
"k8s.io/apimachinery/pkg/runtime/serializer/json"
|
||||||
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
|
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||||
|
// Init the api v1 package
|
||||||
_ "k8s.io/kubernetes/pkg/scheduler/api/v1"
|
_ "k8s.io/kubernetes/pkg/scheduler/api/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,9 @@ var Scheme = runtime.NewScheme()
|
||||||
var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
|
var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
// SchemeBuilder defines a SchemeBuilder object.
|
||||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||||
|
// AddToScheme is used to add stored functions to scheme.
|
||||||
AddToScheme = SchemeBuilder.AddToScheme
|
AddToScheme = SchemeBuilder.AddToScheme
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -26,15 +26,21 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// MaxUint defines the max unsigned int value.
|
||||||
MaxUint = ^uint(0)
|
MaxUint = ^uint(0)
|
||||||
|
// MaxInt defines the max signed int value.
|
||||||
MaxInt = int(MaxUint >> 1)
|
MaxInt = int(MaxUint >> 1)
|
||||||
|
// MaxTotalPriority defines the max total priority value.
|
||||||
MaxTotalPriority = MaxInt
|
MaxTotalPriority = MaxInt
|
||||||
|
// MaxPriority defines the max priority value.
|
||||||
MaxPriority = 10
|
MaxPriority = 10
|
||||||
|
// MaxWeight defines the max weight value.
|
||||||
MaxWeight = MaxInt / MaxPriority
|
MaxWeight = MaxInt / MaxPriority
|
||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// Policy describes a struct of a policy resource in api.
|
||||||
type Policy struct {
|
type Policy struct {
|
||||||
metav1.TypeMeta
|
metav1.TypeMeta
|
||||||
// Holds the information to configure the fit predicate functions.
|
// Holds the information to configure the fit predicate functions.
|
||||||
|
@ -60,6 +66,7 @@ type Policy struct {
|
||||||
AlwaysCheckAllPredicates bool
|
AlwaysCheckAllPredicates bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PredicatePolicy describes a struct of a predicate policy.
|
||||||
type PredicatePolicy struct {
|
type PredicatePolicy struct {
|
||||||
// Identifier of the predicate policy
|
// Identifier of the predicate policy
|
||||||
// For a custom predicate, the name can be user-defined
|
// For a custom predicate, the name can be user-defined
|
||||||
|
@ -69,6 +76,7 @@ type PredicatePolicy struct {
|
||||||
Argument *PredicateArgument
|
Argument *PredicateArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PriorityPolicy describes a struct of a priority policy.
|
||||||
type PriorityPolicy struct {
|
type PriorityPolicy struct {
|
||||||
// Identifier of the priority policy
|
// Identifier of the priority policy
|
||||||
// For a custom priority, the name can be user-defined
|
// For a custom priority, the name can be user-defined
|
||||||
|
@ -151,8 +159,8 @@ type ExtenderConfig struct {
|
||||||
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
|
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
|
||||||
// can implement this function.
|
// can implement this function.
|
||||||
BindVerb string
|
BindVerb string
|
||||||
// EnableHttps specifies whether https should be used to communicate with the extender
|
// EnableHTTPS specifies whether https should be used to communicate with the extender
|
||||||
EnableHttps bool
|
EnableHTTPS bool
|
||||||
// TLSConfig specifies the transport layer security config
|
// TLSConfig specifies the transport layer security config
|
||||||
TLSConfig *restclient.TLSClientConfig
|
TLSConfig *restclient.TLSClientConfig
|
||||||
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
|
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
|
||||||
|
@ -220,6 +228,7 @@ type HostPriority struct {
|
||||||
Score int
|
Score int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HostPriorityList declares a []HostPriority type.
|
||||||
type HostPriorityList []HostPriority
|
type HostPriorityList []HostPriority
|
||||||
|
|
||||||
func (h HostPriorityList) Len() int {
|
func (h HostPriorityList) Len() int {
|
||||||
|
|
|
@ -36,8 +36,11 @@ func init() {
|
||||||
var (
|
var (
|
||||||
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
|
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
|
||||||
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
||||||
|
|
||||||
|
// SchemeBuilder is a v1 api scheme builder.
|
||||||
SchemeBuilder runtime.SchemeBuilder
|
SchemeBuilder runtime.SchemeBuilder
|
||||||
localSchemeBuilder = &SchemeBuilder
|
localSchemeBuilder = &SchemeBuilder
|
||||||
|
// AddToScheme is used to add stored functions to scheme.
|
||||||
AddToScheme = localSchemeBuilder.AddToScheme
|
AddToScheme = localSchemeBuilder.AddToScheme
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// Policy describes a struct for a policy resource used in api.
|
||||||
type Policy struct {
|
type Policy struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
// Holds the information to configure the fit predicate functions
|
// Holds the information to configure the fit predicate functions
|
||||||
|
@ -47,6 +48,7 @@ type Policy struct {
|
||||||
AlwaysCheckAllPredicates bool `json:"alwaysCheckAllPredicates"`
|
AlwaysCheckAllPredicates bool `json:"alwaysCheckAllPredicates"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PredicatePolicy describes a struct of a predicate policy.
|
||||||
type PredicatePolicy struct {
|
type PredicatePolicy struct {
|
||||||
// Identifier of the predicate policy
|
// Identifier of the predicate policy
|
||||||
// For a custom predicate, the name can be user-defined
|
// For a custom predicate, the name can be user-defined
|
||||||
|
@ -56,6 +58,7 @@ type PredicatePolicy struct {
|
||||||
Argument *PredicateArgument `json:"argument"`
|
Argument *PredicateArgument `json:"argument"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PriorityPolicy describes a struct of a priority policy.
|
||||||
type PriorityPolicy struct {
|
type PriorityPolicy struct {
|
||||||
// Identifier of the priority policy
|
// Identifier of the priority policy
|
||||||
// For a custom priority, the name can be user-defined
|
// For a custom priority, the name can be user-defined
|
||||||
|
@ -138,8 +141,8 @@ type ExtenderConfig struct {
|
||||||
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
|
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
|
||||||
// can implement this function.
|
// can implement this function.
|
||||||
BindVerb string
|
BindVerb string
|
||||||
// EnableHttps specifies whether https should be used to communicate with the extender
|
// EnableHTTPS specifies whether https should be used to communicate with the extender
|
||||||
EnableHttps bool `json:"enableHttps,omitempty"`
|
EnableHTTPS bool `json:"enableHttps,omitempty"`
|
||||||
// TLSConfig specifies the transport layer security config
|
// TLSConfig specifies the transport layer security config
|
||||||
TLSConfig *restclient.TLSClientConfig `json:"tlsConfig,omitempty"`
|
TLSConfig *restclient.TLSClientConfig `json:"tlsConfig,omitempty"`
|
||||||
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
|
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
|
||||||
|
@ -207,6 +210,7 @@ type HostPriority struct {
|
||||||
Score int `json:"score"`
|
Score int `json:"score"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HostPriorityList declares a []HostPriority type.
|
||||||
type HostPriorityList []HostPriority
|
type HostPriorityList []HostPriority
|
||||||
|
|
||||||
func (h HostPriorityList) Len() int {
|
func (h HostPriorityList) Len() int {
|
||||||
|
|
|
@ -62,6 +62,7 @@ func newAlgorithmCache() AlgorithmCache {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewEquivalenceCache creates a EquivalenceCache object.
|
||||||
func NewEquivalenceCache(getEquivalencePodFunc algorithm.GetEquivalencePodFunc) *EquivalenceCache {
|
func NewEquivalenceCache(getEquivalencePodFunc algorithm.GetEquivalencePodFunc) *EquivalenceCache {
|
||||||
return &EquivalenceCache{
|
return &EquivalenceCache{
|
||||||
getEquivalencePod: getEquivalencePodFunc,
|
getEquivalencePod: getEquivalencePodFunc,
|
||||||
|
@ -119,15 +120,13 @@ func (ec *EquivalenceCache) PredicateWithECache(
|
||||||
if hostPredicate, ok := predicateMap[equivalenceHash]; ok {
|
if hostPredicate, ok := predicateMap[equivalenceHash]; ok {
|
||||||
if hostPredicate.Fit {
|
if hostPredicate.Fit {
|
||||||
return true, []algorithm.PredicateFailureReason{}, false
|
return true, []algorithm.PredicateFailureReason{}, false
|
||||||
} else {
|
}
|
||||||
return false, hostPredicate.FailReasons, false
|
return false, hostPredicate.FailReasons, false
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// is invalid
|
// is invalid
|
||||||
return false, []algorithm.PredicateFailureReason{}, true
|
return false, []algorithm.PredicateFailureReason{}, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return false, []algorithm.PredicateFailureReason{}, true
|
return false, []algorithm.PredicateFailureReason{}, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// DefaultExtenderTimeout defines the default extender timeout in second.
|
||||||
DefaultExtenderTimeout = 5 * time.Second
|
DefaultExtenderTimeout = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -52,7 +53,7 @@ func makeTransport(config *schedulerapi.ExtenderConfig) (http.RoundTripper, erro
|
||||||
if config.TLSConfig != nil {
|
if config.TLSConfig != nil {
|
||||||
cfg.TLSClientConfig = *config.TLSConfig
|
cfg.TLSClientConfig = *config.TLSConfig
|
||||||
}
|
}
|
||||||
if config.EnableHttps {
|
if config.EnableHTTPS {
|
||||||
hasCA := len(cfg.CAFile) > 0 || len(cfg.CAData) > 0
|
hasCA := len(cfg.CAFile) > 0 || len(cfg.CAData) > 0
|
||||||
if !hasCA {
|
if !hasCA {
|
||||||
cfg.Insecure = true
|
cfg.Insecure = true
|
||||||
|
@ -70,6 +71,7 @@ func makeTransport(config *schedulerapi.ExtenderConfig) (http.RoundTripper, erro
|
||||||
return utilnet.SetTransportDefaults(&http.Transport{}), nil
|
return utilnet.SetTransportDefaults(&http.Transport{}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewHTTPExtender creates an HTTPExtender object.
|
||||||
func NewHTTPExtender(config *schedulerapi.ExtenderConfig) (algorithm.SchedulerExtender, error) {
|
func NewHTTPExtender(config *schedulerapi.ExtenderConfig) (algorithm.SchedulerExtender, error) {
|
||||||
if config.HTTPTimeout.Nanoseconds() == 0 {
|
if config.HTTPTimeout.Nanoseconds() == 0 {
|
||||||
config.HTTPTimeout = time.Duration(DefaultExtenderTimeout)
|
config.HTTPTimeout = time.Duration(DefaultExtenderTimeout)
|
||||||
|
|
|
@ -44,22 +44,27 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
|
||||||
type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
|
type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
|
||||||
|
|
||||||
|
// FitError describes a fit error of a pod.
|
||||||
type FitError struct {
|
type FitError struct {
|
||||||
Pod *v1.Pod
|
Pod *v1.Pod
|
||||||
NumAllNodes int
|
NumAllNodes int
|
||||||
FailedPredicates FailedPredicateMap
|
FailedPredicates FailedPredicateMap
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Victims describes pod victims.
|
||||||
type Victims struct {
|
type Victims struct {
|
||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
numPDBViolations int
|
numPDBViolations int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrNoNodesAvailable defines an error of no nodes available.
|
||||||
var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods")
|
var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods")
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// NoNodeAvailableMsg is used to format message when no nodes available.
|
||||||
NoNodeAvailableMsg = "0/%v nodes are available"
|
NoNodeAvailableMsg = "0/%v nodes are available"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -68,7 +73,7 @@ func (f *FitError) Error() string {
|
||||||
reasons := make(map[string]int)
|
reasons := make(map[string]int)
|
||||||
for _, predicates := range f.FailedPredicates {
|
for _, predicates := range f.FailedPredicates {
|
||||||
for _, pred := range predicates {
|
for _, pred := range predicates {
|
||||||
reasons[pred.GetReason()] += 1
|
reasons[pred.GetReason()]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,7 +388,7 @@ func addNominatedPods(podPriority int32, meta algorithm.PredicateMetadata,
|
||||||
if nominatedPods == nil || len(nominatedPods) == 0 {
|
if nominatedPods == nil || len(nominatedPods) == 0 {
|
||||||
return false, meta, nodeInfo
|
return false, meta, nodeInfo
|
||||||
}
|
}
|
||||||
var metaOut algorithm.PredicateMetadata = nil
|
var metaOut algorithm.PredicateMetadata
|
||||||
if meta != nil {
|
if meta != nil {
|
||||||
metaOut = meta.ShallowCopy()
|
metaOut = meta.ShallowCopy()
|
||||||
}
|
}
|
||||||
|
@ -460,7 +465,7 @@ func podFitsOnNode(
|
||||||
// TODO(bsalamat): consider using eCache and adding proper eCache invalidations
|
// TODO(bsalamat): consider using eCache and adding proper eCache invalidations
|
||||||
// when pods are nominated or their nominations change.
|
// when pods are nominated or their nominations change.
|
||||||
eCacheAvailable = equivCacheInfo != nil && !podsAdded
|
eCacheAvailable = equivCacheInfo != nil && !podsAdded
|
||||||
for _, predicateKey := range predicates.PredicatesOrdering() {
|
for _, predicateKey := range predicates.Ordering() {
|
||||||
//TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric
|
//TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric
|
||||||
if predicate, exist := predicateFuncs[predicateKey]; exist {
|
if predicate, exist := predicateFuncs[predicateKey]; exist {
|
||||||
if eCacheAvailable {
|
if eCacheAvailable {
|
||||||
|
@ -513,7 +518,7 @@ func podFitsOnNode(
|
||||||
return len(failedPredicates) == 0, failedPredicates, nil
|
return len(failedPredicates) == 0, failedPredicates, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prioritizes the nodes by running the individual priority functions in parallel.
|
// PrioritizeNodes prioritizes the nodes by running the individual priority functions in parallel.
|
||||||
// Each priority function is expected to set a score of 0-10
|
// Each priority function is expected to set a score of 0-10
|
||||||
// 0 is the lowest priority score (least preferred node) and 10 is the highest
|
// 0 is the lowest priority score (least preferred node) and 10 is the highest
|
||||||
// Each priority function can also have its own weight
|
// Each priority function can also have its own weight
|
||||||
|
@ -652,7 +657,7 @@ func PrioritizeNodes(
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
|
// EqualPriorityMap is a prioritizer function that gives an equal weight of one to all nodes
|
||||||
func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
|
@ -1050,6 +1055,7 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewGenericScheduler creates a genericScheduler object.
|
||||||
func NewGenericScheduler(
|
func NewGenericScheduler(
|
||||||
cache schedulercache.Cache,
|
cache schedulercache.Cache,
|
||||||
eCache *EquivalenceCache,
|
eCache *EquivalenceCache,
|
||||||
|
|
|
@ -546,7 +546,7 @@ func TestZeroRequest(t *testing.T) {
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse(
|
v1.ResourceCPU: resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCPURequest, 10) + "m"),
|
||||||
v1.ResourceMemory: resource.MustParse(
|
v1.ResourceMemory: resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
|
||||||
},
|
},
|
||||||
|
@ -563,7 +563,7 @@ func TestZeroRequest(t *testing.T) {
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse(
|
v1.ResourceCPU: resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*3, 10) + "m"),
|
||||||
v1.ResourceMemory: resource.MustParse(
|
v1.ResourceMemory: resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
|
||||||
},
|
},
|
||||||
|
@ -716,7 +716,7 @@ var smallContainers = []v1.Container{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
"cpu": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCPURequest, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
"memory": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
|
||||||
},
|
},
|
||||||
|
@ -728,7 +728,7 @@ var mediumContainers = []v1.Container{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
"cpu": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*2, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*2, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
"memory": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest*2, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest*2, 10)),
|
||||||
},
|
},
|
||||||
|
@ -740,7 +740,7 @@ var largeContainers = []v1.Container{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
"cpu": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*3, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
"memory": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
|
||||||
},
|
},
|
||||||
|
@ -752,7 +752,7 @@ var veryLargeContainers = []v1.Container{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
"cpu": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*5, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*5, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
"memory": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest*5, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest*5, 10)),
|
||||||
},
|
},
|
||||||
|
@ -883,7 +883,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodes := []*v1.Node{}
|
nodes := []*v1.Node{}
|
||||||
for _, n := range test.nodes {
|
for _, n := range test.nodes {
|
||||||
node := makeNode(n, priorityutil.DefaultMilliCpuRequest*5, priorityutil.DefaultMemoryRequest*5)
|
node := makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5)
|
||||||
node.ObjectMeta.Labels = map[string]string{"hostname": node.Name}
|
node.ObjectMeta.Labels = map[string]string{"hostname": node.Name}
|
||||||
nodes = append(nodes, node)
|
nodes = append(nodes, node)
|
||||||
}
|
}
|
||||||
|
@ -1046,7 +1046,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodes := []*v1.Node{}
|
nodes := []*v1.Node{}
|
||||||
for _, n := range test.nodes {
|
for _, n := range test.nodes {
|
||||||
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCpuRequest*5, priorityutil.DefaultMemoryRequest*5))
|
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
|
||||||
}
|
}
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
|
||||||
candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
|
candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
|
||||||
|
@ -1285,7 +1285,7 @@ func TestPreempt(t *testing.T) {
|
||||||
cache.AddPod(pod)
|
cache.AddPod(pod)
|
||||||
}
|
}
|
||||||
for _, name := range nodeNames {
|
for _, name := range nodeNames {
|
||||||
cache.AddNode(makeNode(name, priorityutil.DefaultMilliCpuRequest*5, priorityutil.DefaultMemoryRequest*5))
|
cache.AddNode(makeNode(name, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
|
||||||
}
|
}
|
||||||
extenders := []algorithm.SchedulerExtender{}
|
extenders := []algorithm.SchedulerExtender{}
|
||||||
for _, extender := range test.extenders {
|
for _, extender := range test.extenders {
|
||||||
|
|
|
@ -76,10 +76,12 @@ type FIFO struct {
|
||||||
|
|
||||||
var _ = SchedulingQueue(&FIFO{}) // Making sure that FIFO implements SchedulingQueue.
|
var _ = SchedulingQueue(&FIFO{}) // Making sure that FIFO implements SchedulingQueue.
|
||||||
|
|
||||||
|
// Add adds a pod to the FIFO.
|
||||||
func (f *FIFO) Add(pod *v1.Pod) error {
|
func (f *FIFO) Add(pod *v1.Pod) error {
|
||||||
return f.FIFO.Add(pod)
|
return f.FIFO.Add(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddIfNotPresent adds a pod to the FIFO if it is absent in the FIFO.
|
||||||
func (f *FIFO) AddIfNotPresent(pod *v1.Pod) error {
|
func (f *FIFO) AddIfNotPresent(pod *v1.Pod) error {
|
||||||
return f.FIFO.AddIfNotPresent(pod)
|
return f.FIFO.AddIfNotPresent(pod)
|
||||||
}
|
}
|
||||||
|
@ -90,10 +92,12 @@ func (f *FIFO) AddUnschedulableIfNotPresent(pod *v1.Pod) error {
|
||||||
return f.FIFO.AddIfNotPresent(pod)
|
return f.FIFO.AddIfNotPresent(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update updates a pod in the FIFO.
|
||||||
func (f *FIFO) Update(pod *v1.Pod) error {
|
func (f *FIFO) Update(pod *v1.Pod) error {
|
||||||
return f.FIFO.Update(pod)
|
return f.FIFO.Update(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete deletes a pod in the FIFO.
|
||||||
func (f *FIFO) Delete(pod *v1.Pod) error {
|
func (f *FIFO) Delete(pod *v1.Pod) error {
|
||||||
return f.FIFO.Delete(pod)
|
return f.FIFO.Delete(pod)
|
||||||
}
|
}
|
||||||
|
@ -114,7 +118,11 @@ func (f *FIFO) Pop() (*v1.Pod, error) {
|
||||||
|
|
||||||
// FIFO does not need to react to events, as all pods are always in the active
|
// FIFO does not need to react to events, as all pods are always in the active
|
||||||
// scheduling queue anyway.
|
// scheduling queue anyway.
|
||||||
|
|
||||||
|
// AssignedPodAdded does nothing here.
|
||||||
func (f *FIFO) AssignedPodAdded(pod *v1.Pod) {}
|
func (f *FIFO) AssignedPodAdded(pod *v1.Pod) {}
|
||||||
|
|
||||||
|
// AssignedPodUpdated does nothing here.
|
||||||
func (f *FIFO) AssignedPodUpdated(pod *v1.Pod) {}
|
func (f *FIFO) AssignedPodUpdated(pod *v1.Pod) {}
|
||||||
|
|
||||||
// MoveAllToActiveQueue does nothing in FIFO as all pods are always in the active queue.
|
// MoveAllToActiveQueue does nothing in FIFO as all pods are always in the active queue.
|
||||||
|
@ -126,6 +134,7 @@ func (f *FIFO) WaitingPodsForNode(nodeName string) []*v1.Pod {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewFIFO creates a FIFO object.
|
||||||
func NewFIFO() *FIFO {
|
func NewFIFO() *FIFO {
|
||||||
return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||||
}
|
}
|
||||||
|
@ -169,6 +178,7 @@ type PriorityQueue struct {
|
||||||
// Making sure that PriorityQueue implements SchedulingQueue.
|
// Making sure that PriorityQueue implements SchedulingQueue.
|
||||||
var _ = SchedulingQueue(&PriorityQueue{})
|
var _ = SchedulingQueue(&PriorityQueue{})
|
||||||
|
|
||||||
|
// NewPriorityQueue creates a PriorityQueue object.
|
||||||
func NewPriorityQueue() *PriorityQueue {
|
func NewPriorityQueue() *PriorityQueue {
|
||||||
pq := &PriorityQueue{
|
pq := &PriorityQueue{
|
||||||
activeQ: newHeap(cache.MetaNamespaceKeyFunc, util.HigherPriorityPod),
|
activeQ: newHeap(cache.MetaNamespaceKeyFunc, util.HigherPriorityPod),
|
||||||
|
@ -294,11 +304,10 @@ func (p *PriorityQueue) Update(pod *v1.Pod) error {
|
||||||
p.cond.Broadcast()
|
p.cond.Broadcast()
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
} else {
|
}
|
||||||
p.unschedulableQ.Update(pod)
|
p.unschedulableQ.Update(pod)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// If pod is not in any of the two queue, we put it in the active queue.
|
// If pod is not in any of the two queue, we put it in the active queue.
|
||||||
err := p.activeQ.Add(pod)
|
err := p.activeQ.Add(pod)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -417,6 +426,7 @@ type UnschedulablePodsMap struct {
|
||||||
|
|
||||||
var _ = UnschedulablePods(&UnschedulablePodsMap{})
|
var _ = UnschedulablePods(&UnschedulablePodsMap{})
|
||||||
|
|
||||||
|
// NominatedNodeName returns the nominated node name of a pod.
|
||||||
func NominatedNodeName(pod *v1.Pod) string {
|
func NominatedNodeName(pod *v1.Pod) string {
|
||||||
return pod.Status.NominatedNodeName
|
return pod.Status.NominatedNodeName
|
||||||
}
|
}
|
||||||
|
@ -517,7 +527,10 @@ func newUnschedulablePodsMap() *UnschedulablePodsMap {
|
||||||
// as cache.heap, however, this heap does not perform synchronization. It leaves
|
// as cache.heap, however, this heap does not perform synchronization. It leaves
|
||||||
// synchronization to the SchedulingQueue.
|
// synchronization to the SchedulingQueue.
|
||||||
|
|
||||||
|
// LessFunc is a function type to compare two objects.
|
||||||
type LessFunc func(interface{}, interface{}) bool
|
type LessFunc func(interface{}, interface{}) bool
|
||||||
|
|
||||||
|
// KeyFunc is a function type to get the key from an object.
|
||||||
type KeyFunc func(obj interface{}) (string, error)
|
type KeyFunc func(obj interface{}) (string, error)
|
||||||
|
|
||||||
type heapItem struct {
|
type heapItem struct {
|
||||||
|
@ -681,9 +694,8 @@ func (h *Heap) Pop() (interface{}, error) {
|
||||||
obj := heap.Pop(h.data)
|
obj := heap.Pop(h.data)
|
||||||
if obj != nil {
|
if obj != nil {
|
||||||
return obj, nil
|
return obj, nil
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("object was removed from heap data")
|
|
||||||
}
|
}
|
||||||
|
return nil, fmt.Errorf("object was removed from heap data")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the requested item, or sets exists=false.
|
// Get returns the requested item, or sets exists=false.
|
||||||
|
|
|
@ -190,10 +190,9 @@ func NewConfigFactory(
|
||||||
case cache.DeletedFinalStateUnknown:
|
case cache.DeletedFinalStateUnknown:
|
||||||
if pod, ok := t.Obj.(*v1.Pod); ok {
|
if pod, ok := t.Obj.(*v1.Pod); ok {
|
||||||
return assignedNonTerminatedPod(pod)
|
return assignedNonTerminatedPod(pod)
|
||||||
} else {
|
}
|
||||||
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
|
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
|
||||||
return false
|
return false
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
runtime.HandleError(fmt.Errorf("unable to handle object in %T: %T", c, obj))
|
runtime.HandleError(fmt.Errorf("unable to handle object in %T: %T", c, obj))
|
||||||
return false
|
return false
|
||||||
|
@ -216,10 +215,9 @@ func NewConfigFactory(
|
||||||
case cache.DeletedFinalStateUnknown:
|
case cache.DeletedFinalStateUnknown:
|
||||||
if pod, ok := t.Obj.(*v1.Pod); ok {
|
if pod, ok := t.Obj.(*v1.Pod); ok {
|
||||||
return unassignedNonTerminatedPod(pod)
|
return unassignedNonTerminatedPod(pod)
|
||||||
} else {
|
}
|
||||||
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
|
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
|
||||||
return false
|
return false
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
runtime.HandleError(fmt.Errorf("unable to handle object in %T: %T", c, obj))
|
runtime.HandleError(fmt.Errorf("unable to handle object in %T: %T", c, obj))
|
||||||
return false
|
return false
|
||||||
|
@ -531,13 +529,13 @@ func (c *configFactory) GetHardPodAffinitySymmetricWeight() int32 {
|
||||||
return c.hardPodAffinitySymmetricWeight
|
return c.hardPodAffinitySymmetricWeight
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *configFactory) GetSchedulerName() string {
|
func (c *configFactory) GetSchedulerName() string {
|
||||||
return f.schedulerName
|
return c.schedulerName
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetClient provides a kubernetes client, mostly internal use, but may also be called by mock-tests.
|
// GetClient provides a kubernetes client, mostly internal use, but may also be called by mock-tests.
|
||||||
func (f *configFactory) GetClient() clientset.Interface {
|
func (c *configFactory) GetClient() clientset.Interface {
|
||||||
return f.client
|
return c.client
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetScheduledPodListerIndexer provides a pod lister, mostly internal use, but may also be called by mock-tests.
|
// GetScheduledPodListerIndexer provides a pod lister, mostly internal use, but may also be called by mock-tests.
|
||||||
|
@ -865,23 +863,23 @@ func (c *configFactory) deletePDBFromCache(obj interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates a scheduler with the default algorithm provider.
|
// Create creates a scheduler with the default algorithm provider.
|
||||||
func (f *configFactory) Create() (*scheduler.Config, error) {
|
func (c *configFactory) Create() (*scheduler.Config, error) {
|
||||||
return f.CreateFromProvider(DefaultProvider)
|
return c.CreateFromProvider(DefaultProvider)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a scheduler from the name of a registered algorithm provider.
|
// Creates a scheduler from the name of a registered algorithm provider.
|
||||||
func (f *configFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) {
|
func (c *configFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) {
|
||||||
glog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName)
|
glog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName)
|
||||||
provider, err := GetAlgorithmProvider(providerName)
|
provider, err := GetAlgorithmProvider(providerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{})
|
return c.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a scheduler from the configuration file
|
// Creates a scheduler from the configuration file
|
||||||
func (f *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) {
|
func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) {
|
||||||
glog.V(2).Infof("Creating scheduler from configuration: %v", policy)
|
glog.V(2).Infof("Creating scheduler from configuration: %v", policy)
|
||||||
|
|
||||||
// validate the policy configuration
|
// validate the policy configuration
|
||||||
|
@ -923,98 +921,98 @@ func (f *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler
|
||||||
if len(policy.ExtenderConfigs) != 0 {
|
if len(policy.ExtenderConfigs) != 0 {
|
||||||
for ii := range policy.ExtenderConfigs {
|
for ii := range policy.ExtenderConfigs {
|
||||||
glog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii])
|
glog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii])
|
||||||
if extender, err := core.NewHTTPExtender(&policy.ExtenderConfigs[ii]); err != nil {
|
extender, err := core.NewHTTPExtender(&policy.ExtenderConfigs[ii])
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else {
|
|
||||||
extenders = append(extenders, extender)
|
|
||||||
}
|
}
|
||||||
|
extenders = append(extenders, extender)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Providing HardPodAffinitySymmetricWeight in the policy config is the new and preferred way of providing the value.
|
// Providing HardPodAffinitySymmetricWeight in the policy config is the new and preferred way of providing the value.
|
||||||
// Give it higher precedence than scheduler CLI configuration when it is provided.
|
// Give it higher precedence than scheduler CLI configuration when it is provided.
|
||||||
if policy.HardPodAffinitySymmetricWeight != 0 {
|
if policy.HardPodAffinitySymmetricWeight != 0 {
|
||||||
f.hardPodAffinitySymmetricWeight = policy.HardPodAffinitySymmetricWeight
|
c.hardPodAffinitySymmetricWeight = policy.HardPodAffinitySymmetricWeight
|
||||||
}
|
}
|
||||||
// When AlwaysCheckAllPredicates is set to true, scheduler checks all the configured
|
// When AlwaysCheckAllPredicates is set to true, scheduler checks all the configured
|
||||||
// predicates even after one or more of them fails.
|
// predicates even after one or more of them fails.
|
||||||
if policy.AlwaysCheckAllPredicates {
|
if policy.AlwaysCheckAllPredicates {
|
||||||
f.alwaysCheckAllPredicates = policy.AlwaysCheckAllPredicates
|
c.alwaysCheckAllPredicates = policy.AlwaysCheckAllPredicates
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.CreateFromKeys(predicateKeys, priorityKeys, extenders)
|
return c.CreateFromKeys(predicateKeys, priorityKeys, extenders)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBinder returns an extender that supports bind or a default binder.
|
// getBinder returns an extender that supports bind or a default binder.
|
||||||
func (f *configFactory) getBinder(extenders []algorithm.SchedulerExtender) scheduler.Binder {
|
func (c *configFactory) getBinder(extenders []algorithm.SchedulerExtender) scheduler.Binder {
|
||||||
for i := range extenders {
|
for i := range extenders {
|
||||||
if extenders[i].IsBinder() {
|
if extenders[i].IsBinder() {
|
||||||
return extenders[i]
|
return extenders[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &binder{f.client}
|
return &binder{c.client}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a scheduler from a set of registered fit predicate keys and priority keys.
|
// Creates a scheduler from a set of registered fit predicate keys and priority keys.
|
||||||
func (f *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*scheduler.Config, error) {
|
func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*scheduler.Config, error) {
|
||||||
glog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys)
|
glog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys)
|
||||||
|
|
||||||
if f.GetHardPodAffinitySymmetricWeight() < 1 || f.GetHardPodAffinitySymmetricWeight() > 100 {
|
if c.GetHardPodAffinitySymmetricWeight() < 1 || c.GetHardPodAffinitySymmetricWeight() > 100 {
|
||||||
return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", f.GetHardPodAffinitySymmetricWeight())
|
return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", c.GetHardPodAffinitySymmetricWeight())
|
||||||
}
|
}
|
||||||
|
|
||||||
predicateFuncs, err := f.GetPredicates(predicateKeys)
|
predicateFuncs, err := c.GetPredicates(predicateKeys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
priorityConfigs, err := f.GetPriorityFunctionConfigs(priorityKeys)
|
priorityConfigs, err := c.GetPriorityFunctionConfigs(priorityKeys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
priorityMetaProducer, err := f.GetPriorityMetadataProducer()
|
priorityMetaProducer, err := c.GetPriorityMetadataProducer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
predicateMetaProducer, err := f.GetPredicateMetadataProducer()
|
predicateMetaProducer, err := c.GetPredicateMetadataProducer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init equivalence class cache
|
// Init equivalence class cache
|
||||||
if f.enableEquivalenceClassCache && getEquivalencePodFuncFactory != nil {
|
if c.enableEquivalenceClassCache && getEquivalencePodFuncFactory != nil {
|
||||||
pluginArgs, err := f.getPluginArgs()
|
pluginArgs, err := c.getPluginArgs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.equivalencePodCache = core.NewEquivalenceCache(
|
c.equivalencePodCache = core.NewEquivalenceCache(
|
||||||
getEquivalencePodFuncFactory(*pluginArgs),
|
getEquivalencePodFuncFactory(*pluginArgs),
|
||||||
)
|
)
|
||||||
glog.Info("Created equivalence class cache")
|
glog.Info("Created equivalence class cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder, f.pVCLister, f.alwaysCheckAllPredicates)
|
algo := core.NewGenericScheduler(c.schedulerCache, c.equivalencePodCache, c.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, c.volumeBinder, c.pVCLister, c.alwaysCheckAllPredicates)
|
||||||
|
|
||||||
podBackoff := util.CreateDefaultPodBackoff()
|
podBackoff := util.CreateDefaultPodBackoff()
|
||||||
return &scheduler.Config{
|
return &scheduler.Config{
|
||||||
SchedulerCache: f.schedulerCache,
|
SchedulerCache: c.schedulerCache,
|
||||||
Ecache: f.equivalencePodCache,
|
Ecache: c.equivalencePodCache,
|
||||||
// The scheduler only needs to consider schedulable nodes.
|
// The scheduler only needs to consider schedulable nodes.
|
||||||
NodeLister: &nodeLister{f.nodeLister},
|
NodeLister: &nodeLister{c.nodeLister},
|
||||||
Algorithm: algo,
|
Algorithm: algo,
|
||||||
Binder: f.getBinder(extenders),
|
Binder: c.getBinder(extenders),
|
||||||
PodConditionUpdater: &podConditionUpdater{f.client},
|
PodConditionUpdater: &podConditionUpdater{c.client},
|
||||||
PodPreemptor: &podPreemptor{f.client},
|
PodPreemptor: &podPreemptor{c.client},
|
||||||
WaitForCacheSync: func() bool {
|
WaitForCacheSync: func() bool {
|
||||||
return cache.WaitForCacheSync(f.StopEverything, f.scheduledPodsHasSynced)
|
return cache.WaitForCacheSync(c.StopEverything, c.scheduledPodsHasSynced)
|
||||||
},
|
},
|
||||||
NextPod: func() *v1.Pod {
|
NextPod: func() *v1.Pod {
|
||||||
return f.getNextPod()
|
return c.getNextPod()
|
||||||
},
|
},
|
||||||
Error: f.MakeDefaultErrorFunc(podBackoff, f.podQueue),
|
Error: c.MakeDefaultErrorFunc(podBackoff, c.podQueue),
|
||||||
StopEverything: f.StopEverything,
|
StopEverything: c.StopEverything,
|
||||||
VolumeBinder: f.volumeBinder,
|
VolumeBinder: c.volumeBinder,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1026,8 +1024,8 @@ func (n *nodeLister) List() ([]*v1.Node, error) {
|
||||||
return n.NodeLister.List(labels.Everything())
|
return n.NodeLister.List(labels.Everything())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) {
|
func (c *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) {
|
||||||
pluginArgs, err := f.getPluginArgs()
|
pluginArgs, err := c.getPluginArgs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1035,8 +1033,8 @@ func (f *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]
|
||||||
return getPriorityFunctionConfigs(priorityKeys, *pluginArgs)
|
return getPriorityFunctionConfigs(priorityKeys, *pluginArgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) {
|
func (c *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) {
|
||||||
pluginArgs, err := f.getPluginArgs()
|
pluginArgs, err := c.getPluginArgs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1044,16 +1042,16 @@ func (f *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadat
|
||||||
return getPriorityMetadataProducer(*pluginArgs)
|
return getPriorityMetadataProducer(*pluginArgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *configFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) {
|
func (c *configFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) {
|
||||||
pluginArgs, err := f.getPluginArgs()
|
pluginArgs, err := c.getPluginArgs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return getPredicateMetadataProducer(*pluginArgs)
|
return getPredicateMetadataProducer(*pluginArgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *configFactory) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) {
|
func (c *configFactory) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) {
|
||||||
pluginArgs, err := f.getPluginArgs()
|
pluginArgs, err := c.getPluginArgs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1061,32 +1059,32 @@ func (f *configFactory) GetPredicates(predicateKeys sets.String) (map[string]alg
|
||||||
return getFitPredicateFunctions(predicateKeys, *pluginArgs)
|
return getFitPredicateFunctions(predicateKeys, *pluginArgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *configFactory) getPluginArgs() (*PluginFactoryArgs, error) {
|
func (c *configFactory) getPluginArgs() (*PluginFactoryArgs, error) {
|
||||||
return &PluginFactoryArgs{
|
return &PluginFactoryArgs{
|
||||||
PodLister: f.podLister,
|
PodLister: c.podLister,
|
||||||
ServiceLister: f.serviceLister,
|
ServiceLister: c.serviceLister,
|
||||||
ControllerLister: f.controllerLister,
|
ControllerLister: c.controllerLister,
|
||||||
ReplicaSetLister: f.replicaSetLister,
|
ReplicaSetLister: c.replicaSetLister,
|
||||||
StatefulSetLister: f.statefulSetLister,
|
StatefulSetLister: c.statefulSetLister,
|
||||||
NodeLister: &nodeLister{f.nodeLister},
|
NodeLister: &nodeLister{c.nodeLister},
|
||||||
NodeInfo: &predicates.CachedNodeInfo{NodeLister: f.nodeLister},
|
NodeInfo: &predicates.CachedNodeInfo{NodeLister: c.nodeLister},
|
||||||
PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: f.pVLister},
|
PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: c.pVLister},
|
||||||
PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: f.pVCLister},
|
PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: c.pVCLister},
|
||||||
StorageClassInfo: &predicates.CachedStorageClassInfo{StorageClassLister: f.storageClassLister},
|
StorageClassInfo: &predicates.CachedStorageClassInfo{StorageClassLister: c.storageClassLister},
|
||||||
VolumeBinder: f.volumeBinder,
|
VolumeBinder: c.volumeBinder,
|
||||||
HardPodAffinitySymmetricWeight: f.hardPodAffinitySymmetricWeight,
|
HardPodAffinitySymmetricWeight: c.hardPodAffinitySymmetricWeight,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *configFactory) getNextPod() *v1.Pod {
|
func (c *configFactory) getNextPod() *v1.Pod {
|
||||||
if pod, err := f.podQueue.Pop(); err == nil {
|
pod, err := c.podQueue.Pop()
|
||||||
|
if err == nil {
|
||||||
glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
|
glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
|
||||||
return pod
|
return pod
|
||||||
} else {
|
}
|
||||||
glog.Errorf("Error while retrieving next pod from scheduling queue: %v", err)
|
glog.Errorf("Error while retrieving next pod from scheduling queue: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// unassignedNonTerminatedPod selects pods that are unassigned and non-terminal.
|
// unassignedNonTerminatedPod selects pods that are unassigned and non-terminal.
|
||||||
func unassignedNonTerminatedPod(pod *v1.Pod) bool {
|
func unassignedNonTerminatedPod(pod *v1.Pod) bool {
|
||||||
|
@ -1193,7 +1191,7 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration, sche
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) {
|
func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) {
|
||||||
return func(pod *v1.Pod, err error) {
|
return func(pod *v1.Pod, err error) {
|
||||||
if err == core.ErrNoNodesAvailable {
|
if err == core.ErrNoNodesAvailable {
|
||||||
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
|
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
|
||||||
|
@ -1205,13 +1203,13 @@ func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod
|
||||||
nodeName := errStatus.Status().Details.Name
|
nodeName := errStatus.Status().Details.Name
|
||||||
// when node is not found, We do not remove the node right away. Trying again to get
|
// when node is not found, We do not remove the node right away. Trying again to get
|
||||||
// the node and if the node is still not found, then remove it from the scheduler cache.
|
// the node and if the node is still not found, then remove it from the scheduler cache.
|
||||||
_, err := factory.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
_, err := c.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||||
if err != nil && errors.IsNotFound(err) {
|
if err != nil && errors.IsNotFound(err) {
|
||||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}}
|
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}}
|
||||||
factory.schedulerCache.RemoveNode(&node)
|
c.schedulerCache.RemoveNode(&node)
|
||||||
// invalidate cached predicate for the node
|
// invalidate cached predicate for the node
|
||||||
if factory.enableEquivalenceClassCache {
|
if c.enableEquivalenceClassCache {
|
||||||
factory.equivalencePodCache.InvalidateAllCachedPredicateItemOfNode(nodeName)
|
c.equivalencePodCache.InvalidateAllCachedPredicateItemOfNode(nodeName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1245,14 +1243,14 @@ func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod
|
||||||
// Get the pod again; it may have changed/been scheduled already.
|
// Get the pod again; it may have changed/been scheduled already.
|
||||||
getBackoff := initialGetBackoff
|
getBackoff := initialGetBackoff
|
||||||
for {
|
for {
|
||||||
pod, err := factory.client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{})
|
pod, err := c.client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(pod.Spec.NodeName) == 0 {
|
if len(pod.Spec.NodeName) == 0 {
|
||||||
podQueue.AddUnschedulableIfNotPresent(pod)
|
podQueue.AddUnschedulableIfNotPresent(pod)
|
||||||
} else {
|
} else {
|
||||||
if factory.volumeBinder != nil {
|
if c.volumeBinder != nil {
|
||||||
// Volume binder only wants to keep unassigned pods
|
// Volume binder only wants to keep unassigned pods
|
||||||
factory.volumeBinder.DeletePodBindings(pod)
|
c.volumeBinder.DeletePodBindings(pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
@ -1260,9 +1258,9 @@ func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
glog.Warningf("A pod %v no longer exists", podID)
|
glog.Warningf("A pod %v no longer exists", podID)
|
||||||
|
|
||||||
if factory.volumeBinder != nil {
|
if c.volumeBinder != nil {
|
||||||
// Volume binder only wants to keep unassigned pods
|
// Volume binder only wants to keep unassigned pods
|
||||||
factory.volumeBinder.DeletePodBindings(origPod)
|
c.volumeBinder.DeletePodBindings(origPod)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,20 +55,20 @@ type PriorityMetadataProducerFactory func(PluginFactoryArgs) algorithm.PriorityM
|
||||||
// PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args.
|
// PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args.
|
||||||
type PredicateMetadataProducerFactory func(PluginFactoryArgs) algorithm.PredicateMetadataProducer
|
type PredicateMetadataProducerFactory func(PluginFactoryArgs) algorithm.PredicateMetadataProducer
|
||||||
|
|
||||||
// A FitPredicateFactory produces a FitPredicate from the given args.
|
// FitPredicateFactory produces a FitPredicate from the given args.
|
||||||
type FitPredicateFactory func(PluginFactoryArgs) algorithm.FitPredicate
|
type FitPredicateFactory func(PluginFactoryArgs) algorithm.FitPredicate
|
||||||
|
|
||||||
|
// PriorityFunctionFactory produces a PriorityConfig from the given args.
|
||||||
// DEPRECATED
|
// DEPRECATED
|
||||||
// Use Map-Reduce pattern for priority functions.
|
// Use Map-Reduce pattern for priority functions.
|
||||||
// A PriorityFunctionFactory produces a PriorityConfig from the given args.
|
|
||||||
type PriorityFunctionFactory func(PluginFactoryArgs) algorithm.PriorityFunction
|
type PriorityFunctionFactory func(PluginFactoryArgs) algorithm.PriorityFunction
|
||||||
|
|
||||||
// A PriorityFunctionFactory produces map & reduce priority functions
|
// PriorityFunctionFactory2 produces map & reduce priority functions
|
||||||
// from a given args.
|
// from a given args.
|
||||||
// FIXME: Rename to PriorityFunctionFactory.
|
// FIXME: Rename to PriorityFunctionFactory.
|
||||||
type PriorityFunctionFactory2 func(PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction)
|
type PriorityFunctionFactory2 func(PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction)
|
||||||
|
|
||||||
// A PriorityConfigFactory produces a PriorityConfig from the given function and weight
|
// PriorityConfigFactory produces a PriorityConfig from the given function and weight
|
||||||
type PriorityConfigFactory struct {
|
type PriorityConfigFactory struct {
|
||||||
Function PriorityFunctionFactory
|
Function PriorityFunctionFactory
|
||||||
MapReduceFunction PriorityFunctionFactory2
|
MapReduceFunction PriorityFunctionFactory2
|
||||||
|
@ -96,9 +96,11 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// DefaultProvider defines the default algorithm provider name.
|
||||||
DefaultProvider = "DefaultProvider"
|
DefaultProvider = "DefaultProvider"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// AlgorithmProviderConfig is used to store the configuration of algorithm providers.
|
||||||
type AlgorithmProviderConfig struct {
|
type AlgorithmProviderConfig struct {
|
||||||
FitPredicateKeys sets.String
|
FitPredicateKeys sets.String
|
||||||
PriorityFunctionKeys sets.String
|
PriorityFunctionKeys sets.String
|
||||||
|
@ -244,22 +246,24 @@ func IsFitPredicateRegistered(name string) bool {
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegisterPriorityMetadataProducerFactory registers a PriorityMetadataProducerFactory.
|
||||||
func RegisterPriorityMetadataProducerFactory(factory PriorityMetadataProducerFactory) {
|
func RegisterPriorityMetadataProducerFactory(factory PriorityMetadataProducerFactory) {
|
||||||
schedulerFactoryMutex.Lock()
|
schedulerFactoryMutex.Lock()
|
||||||
defer schedulerFactoryMutex.Unlock()
|
defer schedulerFactoryMutex.Unlock()
|
||||||
priorityMetadataProducer = factory
|
priorityMetadataProducer = factory
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegisterPredicateMetadataProducerFactory registers a PredicateMetadataProducerFactory.
|
||||||
func RegisterPredicateMetadataProducerFactory(factory PredicateMetadataProducerFactory) {
|
func RegisterPredicateMetadataProducerFactory(factory PredicateMetadataProducerFactory) {
|
||||||
schedulerFactoryMutex.Lock()
|
schedulerFactoryMutex.Lock()
|
||||||
defer schedulerFactoryMutex.Unlock()
|
defer schedulerFactoryMutex.Unlock()
|
||||||
predicateMetadataProducer = factory
|
predicateMetadataProducer = factory
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegisterPriorityFunction registers a priority function with the algorithm registry. Returns the name,
|
||||||
|
// with which the function was registered.
|
||||||
// DEPRECATED
|
// DEPRECATED
|
||||||
// Use Map-Reduce pattern for priority functions.
|
// Use Map-Reduce pattern for priority functions.
|
||||||
// Registers a priority function with the algorithm registry. Returns the name,
|
|
||||||
// with which the function was registered.
|
|
||||||
func RegisterPriorityFunction(name string, function algorithm.PriorityFunction, weight int) string {
|
func RegisterPriorityFunction(name string, function algorithm.PriorityFunction, weight int) string {
|
||||||
return RegisterPriorityConfigFactory(name, PriorityConfigFactory{
|
return RegisterPriorityConfigFactory(name, PriorityConfigFactory{
|
||||||
Function: func(PluginFactoryArgs) algorithm.PriorityFunction {
|
Function: func(PluginFactoryArgs) algorithm.PriorityFunction {
|
||||||
|
@ -285,6 +289,7 @@ func RegisterPriorityFunction2(
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegisterPriorityConfigFactory registers a priority config factory with its name.
|
||||||
func RegisterPriorityConfigFactory(name string, pcf PriorityConfigFactory) string {
|
func RegisterPriorityConfigFactory(name string, pcf PriorityConfigFactory) string {
|
||||||
schedulerFactoryMutex.Lock()
|
schedulerFactoryMutex.Lock()
|
||||||
defer schedulerFactoryMutex.Unlock()
|
defer schedulerFactoryMutex.Unlock()
|
||||||
|
@ -506,6 +511,7 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListRegisteredFitPredicates returns the registered fit predicates.
|
||||||
func ListRegisteredFitPredicates() []string {
|
func ListRegisteredFitPredicates() []string {
|
||||||
schedulerFactoryMutex.Lock()
|
schedulerFactoryMutex.Lock()
|
||||||
defer schedulerFactoryMutex.Unlock()
|
defer schedulerFactoryMutex.Unlock()
|
||||||
|
@ -517,6 +523,7 @@ func ListRegisteredFitPredicates() []string {
|
||||||
return names
|
return names
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListRegisteredPriorityFunctions returns the registered priority functions.
|
||||||
func ListRegisteredPriorityFunctions() []string {
|
func ListRegisteredPriorityFunctions() []string {
|
||||||
schedulerFactoryMutex.Lock()
|
schedulerFactoryMutex.Lock()
|
||||||
defer schedulerFactoryMutex.Unlock()
|
defer schedulerFactoryMutex.Unlock()
|
||||||
|
|
|
@ -107,7 +107,7 @@ func Register() {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets the time since the specified start in microseconds.
|
// SinceInMicroseconds gets the time since the specified start in microseconds.
|
||||||
func SinceInMicroseconds(start time.Time) float64 {
|
func SinceInMicroseconds(start time.Time) float64 {
|
||||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||||
}
|
}
|
||||||
|
|
|
@ -257,7 +257,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
|
||||||
}
|
}
|
||||||
cache.podStates[key] = ps
|
cache.podStates[key] = ps
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("pod %v was already in added state.", key)
|
return fmt.Errorf("pod %v was already in added state", key)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,7 +125,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||||
Memory: 0,
|
Memory: 0,
|
||||||
},
|
},
|
||||||
nonzeroRequest: &Resource{
|
nonzeroRequest: &Resource{
|
||||||
MilliCPU: priorityutil.DefaultMilliCpuRequest,
|
MilliCPU: priorityutil.DefaultMilliCPURequest,
|
||||||
Memory: priorityutil.DefaultMemoryRequest,
|
Memory: priorityutil.DefaultMemoryRequest,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
|
@ -710,10 +710,10 @@ func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *NodeInfo {
|
||||||
func TestNodeOperators(t *testing.T) {
|
func TestNodeOperators(t *testing.T) {
|
||||||
// Test datas
|
// Test datas
|
||||||
nodeName := "test-node"
|
nodeName := "test-node"
|
||||||
cpu_1 := resource.MustParse("1000m")
|
cpu1 := resource.MustParse("1000m")
|
||||||
mem_100m := resource.MustParse("100m")
|
mem100m := resource.MustParse("100m")
|
||||||
cpu_half := resource.MustParse("500m")
|
cpuHalf := resource.MustParse("500m")
|
||||||
mem_50m := resource.MustParse("50m")
|
mem50m := resource.MustParse("50m")
|
||||||
resourceFooName := "example.com/foo"
|
resourceFooName := "example.com/foo"
|
||||||
resourceFoo := resource.MustParse("1")
|
resourceFoo := resource.MustParse("1")
|
||||||
|
|
||||||
|
@ -728,8 +728,8 @@ func TestNodeOperators(t *testing.T) {
|
||||||
},
|
},
|
||||||
Status: v1.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Allocatable: v1.ResourceList{
|
Allocatable: v1.ResourceList{
|
||||||
v1.ResourceCPU: cpu_1,
|
v1.ResourceCPU: cpu1,
|
||||||
v1.ResourceMemory: mem_100m,
|
v1.ResourceMemory: mem100m,
|
||||||
v1.ResourceName(resourceFooName): resourceFoo,
|
v1.ResourceName(resourceFooName): resourceFoo,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -754,8 +754,8 @@ func TestNodeOperators(t *testing.T) {
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceCPU: cpu_half,
|
v1.ResourceCPU: cpuHalf,
|
||||||
v1.ResourceMemory: mem_50m,
|
v1.ResourceMemory: mem50m,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Ports: []v1.ContainerPort{
|
Ports: []v1.ContainerPort{
|
||||||
|
@ -778,8 +778,8 @@ func TestNodeOperators(t *testing.T) {
|
||||||
},
|
},
|
||||||
Status: v1.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Allocatable: v1.ResourceList{
|
Allocatable: v1.ResourceList{
|
||||||
v1.ResourceCPU: cpu_1,
|
v1.ResourceCPU: cpu1,
|
||||||
v1.ResourceMemory: mem_100m,
|
v1.ResourceMemory: mem100m,
|
||||||
v1.ResourceName(resourceFooName): resourceFoo,
|
v1.ResourceName(resourceFooName): resourceFoo,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -804,8 +804,8 @@ func TestNodeOperators(t *testing.T) {
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceCPU: cpu_half,
|
v1.ResourceCPU: cpuHalf,
|
||||||
v1.ResourceMemory: mem_50m,
|
v1.ResourceMemory: mem50m,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -822,8 +822,8 @@ func TestNodeOperators(t *testing.T) {
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceCPU: cpu_half,
|
v1.ResourceCPU: cpuHalf,
|
||||||
v1.ResourceMemory: mem_50m,
|
v1.ResourceMemory: mem50m,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -866,8 +866,8 @@ func TestNodeOperators(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Case 3: update node attribute successfully.
|
// Case 3: update node attribute successfully.
|
||||||
node.Status.Allocatable[v1.ResourceMemory] = mem_50m
|
node.Status.Allocatable[v1.ResourceMemory] = mem50m
|
||||||
expected.allocatableResource.Memory = mem_50m.Value()
|
expected.allocatableResource.Memory = mem50m.Value()
|
||||||
expected.generation++
|
expected.generation++
|
||||||
cache.UpdateNode(nil, node)
|
cache.UpdateNode(nil, node)
|
||||||
got, found = cache.nodes[node.Name]
|
got, found = cache.nodes[node.Name]
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// PodFilter is a function to filter a pod. If pod passed return true else return false.
|
||||||
type PodFilter func(*v1.Pod) bool
|
type PodFilter func(*v1.Pod) bool
|
||||||
|
|
||||||
// Cache collects pods' information and provides node-level aggregated information.
|
// Cache collects pods' information and provides node-level aggregated information.
|
||||||
|
|
|
@ -75,7 +75,7 @@ type Resource struct {
|
||||||
ScalarResources map[v1.ResourceName]int64
|
ScalarResources map[v1.ResourceName]int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a Resource from ResourceList
|
// NewResource creates a Resource from ResourceList
|
||||||
func NewResource(rl v1.ResourceList) *Resource {
|
func NewResource(rl v1.ResourceList) *Resource {
|
||||||
r := &Resource{}
|
r := &Resource{}
|
||||||
r.Add(rl)
|
r.Add(rl)
|
||||||
|
@ -108,6 +108,7 @@ func (r *Resource) Add(rl v1.ResourceList) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResourceList returns a resource list of this resource.
|
||||||
func (r *Resource) ResourceList() v1.ResourceList {
|
func (r *Resource) ResourceList() v1.ResourceList {
|
||||||
result := v1.ResourceList{
|
result := v1.ResourceList{
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
|
||||||
|
@ -126,6 +127,7 @@ func (r *Resource) ResourceList() v1.ResourceList {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clone returns a copy of this resource.
|
||||||
func (r *Resource) Clone() *Resource {
|
func (r *Resource) Clone() *Resource {
|
||||||
res := &Resource{
|
res := &Resource{
|
||||||
MilliCPU: r.MilliCPU,
|
MilliCPU: r.MilliCPU,
|
||||||
|
@ -143,10 +145,12 @@ func (r *Resource) Clone() *Resource {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddScalar adds a resource by a scalar value of this resource.
|
||||||
func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) {
|
func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) {
|
||||||
r.SetScalar(name, r.ScalarResources[name]+quantity)
|
r.SetScalar(name, r.ScalarResources[name]+quantity)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetScalar sets a resource by a scalar value of this resource.
|
||||||
func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) {
|
func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) {
|
||||||
// Lazily allocate scalar resource map.
|
// Lazily allocate scalar resource map.
|
||||||
if r.ScalarResources == nil {
|
if r.ScalarResources == nil {
|
||||||
|
@ -172,7 +176,7 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
|
||||||
return ni
|
return ni
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns overall information about this node.
|
// Node returns overall information about this node.
|
||||||
func (n *NodeInfo) Node() *v1.Node {
|
func (n *NodeInfo) Node() *v1.Node {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -188,6 +192,7 @@ func (n *NodeInfo) Pods() []*v1.Pod {
|
||||||
return n.pods
|
return n.pods
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UsedPorts returns used ports on this node.
|
||||||
func (n *NodeInfo) UsedPorts() util.HostPortInfo {
|
func (n *NodeInfo) UsedPorts() util.HostPortInfo {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -203,6 +208,7 @@ func (n *NodeInfo) PodsWithAffinity() []*v1.Pod {
|
||||||
return n.podsWithAffinity
|
return n.podsWithAffinity
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AllowedPodNumber returns the number of the allowed pods on this node.
|
||||||
func (n *NodeInfo) AllowedPodNumber() int {
|
func (n *NodeInfo) AllowedPodNumber() int {
|
||||||
if n == nil || n.allocatableResource == nil {
|
if n == nil || n.allocatableResource == nil {
|
||||||
return 0
|
return 0
|
||||||
|
@ -210,6 +216,7 @@ func (n *NodeInfo) AllowedPodNumber() int {
|
||||||
return n.allocatableResource.AllowedPodNumber
|
return n.allocatableResource.AllowedPodNumber
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Taints returns the taints list on this node.
|
||||||
func (n *NodeInfo) Taints() ([]v1.Taint, error) {
|
func (n *NodeInfo) Taints() ([]v1.Taint, error) {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -217,6 +224,7 @@ func (n *NodeInfo) Taints() ([]v1.Taint, error) {
|
||||||
return n.taints, n.taintsErr
|
return n.taints, n.taintsErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MemoryPressureCondition returns the memory pressure condition status on this node.
|
||||||
func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus {
|
func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return v1.ConditionUnknown
|
return v1.ConditionUnknown
|
||||||
|
@ -224,6 +232,7 @@ func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus {
|
||||||
return n.memoryPressureCondition
|
return n.memoryPressureCondition
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DiskPressureCondition returns the disk pressure condition status on this node.
|
||||||
func (n *NodeInfo) DiskPressureCondition() v1.ConditionStatus {
|
func (n *NodeInfo) DiskPressureCondition() v1.ConditionStatus {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return v1.ConditionUnknown
|
return v1.ConditionUnknown
|
||||||
|
@ -260,6 +269,7 @@ func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) {
|
||||||
n.allocatableResource = allocatableResource
|
n.allocatableResource = allocatableResource
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clone returns a copy of this node.
|
||||||
func (n *NodeInfo) Clone() *NodeInfo {
|
func (n *NodeInfo) Clone() *NodeInfo {
|
||||||
clone := &NodeInfo{
|
clone := &NodeInfo{
|
||||||
node: n.node,
|
node: n.node,
|
||||||
|
@ -306,7 +316,7 @@ func hasPodAffinityConstraints(pod *v1.Pod) bool {
|
||||||
|
|
||||||
// AddPod adds pod information to this NodeInfo.
|
// AddPod adds pod information to this NodeInfo.
|
||||||
func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
||||||
res, non0_cpu, non0_mem := calculateResource(pod)
|
res, non0CPU, non0Mem := calculateResource(pod)
|
||||||
n.requestedResource.MilliCPU += res.MilliCPU
|
n.requestedResource.MilliCPU += res.MilliCPU
|
||||||
n.requestedResource.Memory += res.Memory
|
n.requestedResource.Memory += res.Memory
|
||||||
n.requestedResource.NvidiaGPU += res.NvidiaGPU
|
n.requestedResource.NvidiaGPU += res.NvidiaGPU
|
||||||
|
@ -317,8 +327,8 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
||||||
for rName, rQuant := range res.ScalarResources {
|
for rName, rQuant := range res.ScalarResources {
|
||||||
n.requestedResource.ScalarResources[rName] += rQuant
|
n.requestedResource.ScalarResources[rName] += rQuant
|
||||||
}
|
}
|
||||||
n.nonzeroRequest.MilliCPU += non0_cpu
|
n.nonzeroRequest.MilliCPU += non0CPU
|
||||||
n.nonzeroRequest.Memory += non0_mem
|
n.nonzeroRequest.Memory += non0Mem
|
||||||
n.pods = append(n.pods, pod)
|
n.pods = append(n.pods, pod)
|
||||||
if hasPodAffinityConstraints(pod) {
|
if hasPodAffinityConstraints(pod) {
|
||||||
n.podsWithAffinity = append(n.podsWithAffinity, pod)
|
n.podsWithAffinity = append(n.podsWithAffinity, pod)
|
||||||
|
@ -361,7 +371,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||||
n.pods[i] = n.pods[len(n.pods)-1]
|
n.pods[i] = n.pods[len(n.pods)-1]
|
||||||
n.pods = n.pods[:len(n.pods)-1]
|
n.pods = n.pods[:len(n.pods)-1]
|
||||||
// reduce the resource data
|
// reduce the resource data
|
||||||
res, non0_cpu, non0_mem := calculateResource(pod)
|
res, non0CPU, non0Mem := calculateResource(pod)
|
||||||
|
|
||||||
n.requestedResource.MilliCPU -= res.MilliCPU
|
n.requestedResource.MilliCPU -= res.MilliCPU
|
||||||
n.requestedResource.Memory -= res.Memory
|
n.requestedResource.Memory -= res.Memory
|
||||||
|
@ -372,8 +382,8 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||||
for rName, rQuant := range res.ScalarResources {
|
for rName, rQuant := range res.ScalarResources {
|
||||||
n.requestedResource.ScalarResources[rName] -= rQuant
|
n.requestedResource.ScalarResources[rName] -= rQuant
|
||||||
}
|
}
|
||||||
n.nonzeroRequest.MilliCPU -= non0_cpu
|
n.nonzeroRequest.MilliCPU -= non0CPU
|
||||||
n.nonzeroRequest.Memory -= non0_mem
|
n.nonzeroRequest.Memory -= non0Mem
|
||||||
|
|
||||||
// Release ports when remove Pods.
|
// Release ports when remove Pods.
|
||||||
n.updateUsedPorts(pod, false)
|
n.updateUsedPorts(pod, false)
|
||||||
|
@ -386,14 +396,14 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||||
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
|
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int64) {
|
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
|
||||||
resPtr := &res
|
resPtr := &res
|
||||||
for _, c := range pod.Spec.Containers {
|
for _, c := range pod.Spec.Containers {
|
||||||
resPtr.Add(c.Resources.Requests)
|
resPtr.Add(c.Resources.Requests)
|
||||||
|
|
||||||
non0_cpu_req, non0_mem_req := priorityutil.GetNonzeroRequests(&c.Resources.Requests)
|
non0CPUReq, non0MemReq := priorityutil.GetNonzeroRequests(&c.Resources.Requests)
|
||||||
non0_cpu += non0_cpu_req
|
non0CPU += non0CPUReq
|
||||||
non0_mem += non0_mem_req
|
non0Mem += non0MemReq
|
||||||
// No non-zero resources for GPUs or opaque resources.
|
// No non-zero resources for GPUs or opaque resources.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -414,7 +424,7 @@ func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets the overall node information.
|
// SetNode sets the overall node information.
|
||||||
func (n *NodeInfo) SetNode(node *v1.Node) error {
|
func (n *NodeInfo) SetNode(node *v1.Node) error {
|
||||||
n.node = node
|
n.node = node
|
||||||
|
|
||||||
|
@ -436,7 +446,7 @@ func (n *NodeInfo) SetNode(node *v1.Node) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes the overall information about the node.
|
// RemoveNode removes the overall information about the node.
|
||||||
func (n *NodeInfo) RemoveNode(node *v1.Node) error {
|
func (n *NodeInfo) RemoveNode(node *v1.Node) error {
|
||||||
// We don't remove NodeInfo for because there can still be some pods on this node -
|
// We don't remove NodeInfo for because there can still be some pods on this node -
|
||||||
// this is because notifications about pods are delivered in a different watch,
|
// this is because notifications about pods are delivered in a different watch,
|
||||||
|
|
|
@ -31,54 +31,72 @@ type FakeCache struct {
|
||||||
GetPodFunc func(*v1.Pod) *v1.Pod
|
GetPodFunc func(*v1.Pod) *v1.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AssumePod is a fake method for testing.
|
||||||
func (f *FakeCache) AssumePod(pod *v1.Pod) error {
|
func (f *FakeCache) AssumePod(pod *v1.Pod) error {
|
||||||
f.AssumeFunc(pod)
|
f.AssumeFunc(pod)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FinishBinding is a fake method for testing.
|
||||||
func (f *FakeCache) FinishBinding(pod *v1.Pod) error { return nil }
|
func (f *FakeCache) FinishBinding(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
|
// ForgetPod is a fake method for testing.
|
||||||
func (f *FakeCache) ForgetPod(pod *v1.Pod) error {
|
func (f *FakeCache) ForgetPod(pod *v1.Pod) error {
|
||||||
f.ForgetFunc(pod)
|
f.ForgetFunc(pod)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddPod is a fake method for testing.
|
||||||
func (f *FakeCache) AddPod(pod *v1.Pod) error { return nil }
|
func (f *FakeCache) AddPod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
|
// UpdatePod is a fake method for testing.
|
||||||
func (f *FakeCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil }
|
func (f *FakeCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
|
// RemovePod is a fake method for testing.
|
||||||
func (f *FakeCache) RemovePod(pod *v1.Pod) error { return nil }
|
func (f *FakeCache) RemovePod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
|
// IsAssumedPod is a fake method for testing.
|
||||||
func (f *FakeCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
|
func (f *FakeCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
|
||||||
return f.IsAssumedPodFunc(pod), nil
|
return f.IsAssumedPodFunc(pod), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPod is a fake method for testing.
|
||||||
func (f *FakeCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
|
func (f *FakeCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
|
||||||
return f.GetPodFunc(pod), nil
|
return f.GetPodFunc(pod), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddNode is a fake method for testing.
|
||||||
func (f *FakeCache) AddNode(node *v1.Node) error { return nil }
|
func (f *FakeCache) AddNode(node *v1.Node) error { return nil }
|
||||||
|
|
||||||
|
// UpdateNode is a fake method for testing.
|
||||||
func (f *FakeCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
|
func (f *FakeCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
|
||||||
|
|
||||||
|
// RemoveNode is a fake method for testing.
|
||||||
func (f *FakeCache) RemoveNode(node *v1.Node) error { return nil }
|
func (f *FakeCache) RemoveNode(node *v1.Node) error { return nil }
|
||||||
|
|
||||||
|
// UpdateNodeNameToInfoMap is a fake method for testing.
|
||||||
func (f *FakeCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
|
func (f *FakeCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddPDB is a fake method for testing.
|
||||||
func (f *FakeCache) AddPDB(pdb *policy.PodDisruptionBudget) error { return nil }
|
func (f *FakeCache) AddPDB(pdb *policy.PodDisruptionBudget) error { return nil }
|
||||||
|
|
||||||
|
// UpdatePDB is a fake method for testing.
|
||||||
func (f *FakeCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error { return nil }
|
func (f *FakeCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error { return nil }
|
||||||
|
|
||||||
|
// RemovePDB is a fake method for testing.
|
||||||
func (f *FakeCache) RemovePDB(pdb *policy.PodDisruptionBudget) error { return nil }
|
func (f *FakeCache) RemovePDB(pdb *policy.PodDisruptionBudget) error { return nil }
|
||||||
|
|
||||||
|
// ListPDBs is a fake method for testing.
|
||||||
func (f *FakeCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) {
|
func (f *FakeCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List is a fake method for testing.
|
||||||
func (f *FakeCache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
|
func (f *FakeCache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
|
||||||
|
|
||||||
|
// FilteredList is a fake method for testing.
|
||||||
func (f *FakeCache) FilteredList(filter schedulercache.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
func (f *FakeCache) FilteredList(filter schedulercache.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,11 +25,11 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
. "k8s.io/kubernetes/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ NodeLister = &FakeNodeLister{}
|
var _ algorithm.NodeLister = &FakeNodeLister{}
|
||||||
|
|
||||||
// FakeNodeLister implements NodeLister on a []string for test purposes.
|
// FakeNodeLister implements NodeLister on a []string for test purposes.
|
||||||
type FakeNodeLister []*v1.Node
|
type FakeNodeLister []*v1.Node
|
||||||
|
@ -39,7 +39,7 @@ func (f FakeNodeLister) List() ([]*v1.Node, error) {
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ PodLister = &FakePodLister{}
|
var _ algorithm.PodLister = &FakePodLister{}
|
||||||
|
|
||||||
// FakePodLister implements PodLister on an []v1.Pods for test purposes.
|
// FakePodLister implements PodLister on an []v1.Pods for test purposes.
|
||||||
type FakePodLister []*v1.Pod
|
type FakePodLister []*v1.Pod
|
||||||
|
@ -54,6 +54,7 @@ func (f FakePodLister) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
||||||
return selected, nil
|
return selected, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FilteredList returns pods matching a pod filter and a label selector.
|
||||||
func (f FakePodLister) FilteredList(podFilter schedulercache.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
|
func (f FakePodLister) FilteredList(podFilter schedulercache.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
|
||||||
for _, pod := range f {
|
for _, pod := range f {
|
||||||
if podFilter(pod) && s.Matches(labels.Set(pod.Labels)) {
|
if podFilter(pod) && s.Matches(labels.Set(pod.Labels)) {
|
||||||
|
@ -63,7 +64,7 @@ func (f FakePodLister) FilteredList(podFilter schedulercache.PodFilter, s labels
|
||||||
return selected, nil
|
return selected, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ ServiceLister = &FakeServiceLister{}
|
var _ algorithm.ServiceLister = &FakeServiceLister{}
|
||||||
|
|
||||||
// FakeServiceLister implements ServiceLister on []v1.Service for test purposes.
|
// FakeServiceLister implements ServiceLister on []v1.Service for test purposes.
|
||||||
type FakeServiceLister []*v1.Service
|
type FakeServiceLister []*v1.Service
|
||||||
|
@ -91,7 +92,7 @@ func (f FakeServiceLister) GetPodServices(pod *v1.Pod) (services []*v1.Service,
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ ControllerLister = &FakeControllerLister{}
|
var _ algorithm.ControllerLister = &FakeControllerLister{}
|
||||||
|
|
||||||
// FakeControllerLister implements ControllerLister on []v1.ReplicationController for test purposes.
|
// FakeControllerLister implements ControllerLister on []v1.ReplicationController for test purposes.
|
||||||
type FakeControllerLister []*v1.ReplicationController
|
type FakeControllerLister []*v1.ReplicationController
|
||||||
|
@ -122,7 +123,7 @@ func (f FakeControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ ReplicaSetLister = &FakeReplicaSetLister{}
|
var _ algorithm.ReplicaSetLister = &FakeReplicaSetLister{}
|
||||||
|
|
||||||
// FakeReplicaSetLister implements ControllerLister on []extensions.ReplicaSet for test purposes.
|
// FakeReplicaSetLister implements ControllerLister on []extensions.ReplicaSet for test purposes.
|
||||||
type FakeReplicaSetLister []*extensions.ReplicaSet
|
type FakeReplicaSetLister []*extensions.ReplicaSet
|
||||||
|
@ -151,7 +152,7 @@ func (f FakeReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ StatefulSetLister = &FakeStatefulSetLister{}
|
var _ algorithm.StatefulSetLister = &FakeStatefulSetLister{}
|
||||||
|
|
||||||
// FakeStatefulSetLister implements ControllerLister on []apps.StatefulSet for testing purposes.
|
// FakeStatefulSetLister implements ControllerLister on []apps.StatefulSet for testing purposes.
|
||||||
type FakeStatefulSetLister []*apps.StatefulSet
|
type FakeStatefulSetLister []*apps.StatefulSet
|
||||||
|
@ -183,10 +184,12 @@ type FakePersistentVolumeClaimLister []*v1.PersistentVolumeClaim
|
||||||
|
|
||||||
var _ corelisters.PersistentVolumeClaimLister = FakePersistentVolumeClaimLister{}
|
var _ corelisters.PersistentVolumeClaimLister = FakePersistentVolumeClaimLister{}
|
||||||
|
|
||||||
|
// List returns not implemented error.
|
||||||
func (f FakePersistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) {
|
func (f FakePersistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) {
|
||||||
return nil, fmt.Errorf("not implemented")
|
return nil, fmt.Errorf("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PersistentVolumeClaims returns a FakePersistentVolumeClaimLister object.
|
||||||
func (f FakePersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) corelisters.PersistentVolumeClaimNamespaceLister {
|
func (f FakePersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) corelisters.PersistentVolumeClaimNamespaceLister {
|
||||||
return &fakePersistentVolumeClaimNamespaceLister{
|
return &fakePersistentVolumeClaimNamespaceLister{
|
||||||
pvcs: f,
|
pvcs: f,
|
||||||
|
|
|
@ -25,26 +25,36 @@ import (
|
||||||
// PodsToCache is used for testing
|
// PodsToCache is used for testing
|
||||||
type PodsToCache []*v1.Pod
|
type PodsToCache []*v1.Pod
|
||||||
|
|
||||||
|
// AssumePod returns nil.
|
||||||
func (p PodsToCache) AssumePod(pod *v1.Pod) error { return nil }
|
func (p PodsToCache) AssumePod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
|
// ForgetPod returns nil.
|
||||||
func (p PodsToCache) ForgetPod(pod *v1.Pod) error { return nil }
|
func (p PodsToCache) ForgetPod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
|
// AddPod returns nil.
|
||||||
func (p PodsToCache) AddPod(pod *v1.Pod) error { return nil }
|
func (p PodsToCache) AddPod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
|
// UpdatePod returns nil.
|
||||||
func (p PodsToCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil }
|
func (p PodsToCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
|
// RemovePod returns nil.
|
||||||
func (p PodsToCache) RemovePod(pod *v1.Pod) error { return nil }
|
func (p PodsToCache) RemovePod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
|
// AddNode returns nil.
|
||||||
func (p PodsToCache) AddNode(node *v1.Node) error { return nil }
|
func (p PodsToCache) AddNode(node *v1.Node) error { return nil }
|
||||||
|
|
||||||
|
// UpdateNode returns nil.
|
||||||
func (p PodsToCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
|
func (p PodsToCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
|
||||||
|
|
||||||
|
// RemoveNode returns nil.
|
||||||
func (p PodsToCache) RemoveNode(node *v1.Node) error { return nil }
|
func (p PodsToCache) RemoveNode(node *v1.Node) error { return nil }
|
||||||
|
|
||||||
|
// UpdateNodeNameToInfoMap returns nil.
|
||||||
func (p PodsToCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
|
func (p PodsToCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List returns pods matching the label selector.
|
||||||
func (p PodsToCache) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
func (p PodsToCache) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
||||||
for _, pod := range p {
|
for _, pod := range p {
|
||||||
if s.Matches(labels.Set(pod.Labels)) {
|
if s.Matches(labels.Set(pod.Labels)) {
|
||||||
|
|
|
@ -37,10 +37,10 @@ func (realClock) Now() time.Time {
|
||||||
return time.Now()
|
return time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
// backoffEntry is single threaded. in particular, it only allows a single action to be waiting on backoff at a time.
|
// BackoffEntry is single threaded. in particular, it only allows a single action to be waiting on backoff at a time.
|
||||||
// It is expected that all users will only use the public TryWait(...) method
|
// It is expected that all users will only use the public TryWait(...) method
|
||||||
// It is also not safe to copy this object.
|
// It is also not safe to copy this object.
|
||||||
type backoffEntry struct {
|
type BackoffEntry struct {
|
||||||
backoff time.Duration
|
backoff time.Duration
|
||||||
lastUpdate time.Time
|
lastUpdate time.Time
|
||||||
reqInFlight int32
|
reqInFlight int32
|
||||||
|
@ -48,19 +48,19 @@ type backoffEntry struct {
|
||||||
|
|
||||||
// tryLock attempts to acquire a lock via atomic compare and swap.
|
// tryLock attempts to acquire a lock via atomic compare and swap.
|
||||||
// returns true if the lock was acquired, false otherwise
|
// returns true if the lock was acquired, false otherwise
|
||||||
func (b *backoffEntry) tryLock() bool {
|
func (b *BackoffEntry) tryLock() bool {
|
||||||
return atomic.CompareAndSwapInt32(&b.reqInFlight, 0, 1)
|
return atomic.CompareAndSwapInt32(&b.reqInFlight, 0, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// unlock returns the lock. panics if the lock isn't held
|
// unlock returns the lock. panics if the lock isn't held
|
||||||
func (b *backoffEntry) unlock() {
|
func (b *BackoffEntry) unlock() {
|
||||||
if !atomic.CompareAndSwapInt32(&b.reqInFlight, 1, 0) {
|
if !atomic.CompareAndSwapInt32(&b.reqInFlight, 1, 0) {
|
||||||
panic(fmt.Sprintf("unexpected state on unlocking: %+v", b))
|
panic(fmt.Sprintf("unexpected state on unlocking: %+v", b))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryWait tries to acquire the backoff lock, maxDuration is the maximum allowed period to wait for.
|
// TryWait tries to acquire the backoff lock, maxDuration is the maximum allowed period to wait for.
|
||||||
func (b *backoffEntry) TryWait(maxDuration time.Duration) bool {
|
func (b *BackoffEntry) TryWait(maxDuration time.Duration) bool {
|
||||||
if !b.tryLock() {
|
if !b.tryLock() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -69,62 +69,69 @@ func (b *backoffEntry) TryWait(maxDuration time.Duration) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *backoffEntry) getBackoff(maxDuration time.Duration) time.Duration {
|
func (b *BackoffEntry) getBackoff(maxDuration time.Duration) time.Duration {
|
||||||
duration := entry.backoff
|
duration := b.backoff
|
||||||
newDuration := time.Duration(duration) * 2
|
newDuration := time.Duration(duration) * 2
|
||||||
if newDuration > maxDuration {
|
if newDuration > maxDuration {
|
||||||
newDuration = maxDuration
|
newDuration = maxDuration
|
||||||
}
|
}
|
||||||
entry.backoff = newDuration
|
b.backoff = newDuration
|
||||||
glog.V(4).Infof("Backing off %s", duration.String())
|
glog.V(4).Infof("Backing off %s", duration.String())
|
||||||
return duration
|
return duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *backoffEntry) wait(maxDuration time.Duration) {
|
func (b *BackoffEntry) wait(maxDuration time.Duration) {
|
||||||
time.Sleep(entry.getBackoff(maxDuration))
|
time.Sleep(b.getBackoff(maxDuration))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PodBackoff is used to restart a pod with back-off delay.
|
||||||
type PodBackoff struct {
|
type PodBackoff struct {
|
||||||
perPodBackoff map[ktypes.NamespacedName]*backoffEntry
|
perPodBackoff map[ktypes.NamespacedName]*BackoffEntry
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
clock clock
|
clock clock
|
||||||
defaultDuration time.Duration
|
defaultDuration time.Duration
|
||||||
maxDuration time.Duration
|
maxDuration time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxDuration returns the max time duration of the back-off.
|
||||||
func (p *PodBackoff) MaxDuration() time.Duration {
|
func (p *PodBackoff) MaxDuration() time.Duration {
|
||||||
return p.maxDuration
|
return p.maxDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateDefaultPodBackoff creates a default pod back-off object.
|
||||||
func CreateDefaultPodBackoff() *PodBackoff {
|
func CreateDefaultPodBackoff() *PodBackoff {
|
||||||
return CreatePodBackoff(1*time.Second, 60*time.Second)
|
return CreatePodBackoff(1*time.Second, 60*time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreatePodBackoff creates a pod back-off object by default duration and max duration.
|
||||||
func CreatePodBackoff(defaultDuration, maxDuration time.Duration) *PodBackoff {
|
func CreatePodBackoff(defaultDuration, maxDuration time.Duration) *PodBackoff {
|
||||||
return CreatePodBackoffWithClock(defaultDuration, maxDuration, realClock{})
|
return CreatePodBackoffWithClock(defaultDuration, maxDuration, realClock{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreatePodBackoffWithClock creates a pod back-off object by default duration, max duration and clock.
|
||||||
func CreatePodBackoffWithClock(defaultDuration, maxDuration time.Duration, clock clock) *PodBackoff {
|
func CreatePodBackoffWithClock(defaultDuration, maxDuration time.Duration, clock clock) *PodBackoff {
|
||||||
return &PodBackoff{
|
return &PodBackoff{
|
||||||
perPodBackoff: map[ktypes.NamespacedName]*backoffEntry{},
|
perPodBackoff: map[ktypes.NamespacedName]*BackoffEntry{},
|
||||||
clock: clock,
|
clock: clock,
|
||||||
defaultDuration: defaultDuration,
|
defaultDuration: defaultDuration,
|
||||||
maxDuration: maxDuration,
|
maxDuration: maxDuration,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PodBackoff) GetEntry(podID ktypes.NamespacedName) *backoffEntry {
|
// GetEntry returns a back-off entry by Pod ID.
|
||||||
|
func (p *PodBackoff) GetEntry(podID ktypes.NamespacedName) *BackoffEntry {
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
entry, ok := p.perPodBackoff[podID]
|
entry, ok := p.perPodBackoff[podID]
|
||||||
if !ok {
|
if !ok {
|
||||||
entry = &backoffEntry{backoff: p.defaultDuration}
|
entry = &BackoffEntry{backoff: p.defaultDuration}
|
||||||
p.perPodBackoff[podID] = entry
|
p.perPodBackoff[podID] = entry
|
||||||
}
|
}
|
||||||
entry.lastUpdate = p.clock.Now()
|
entry.lastUpdate = p.clock.Now()
|
||||||
return entry
|
return entry
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Gc execute garbage collection on the pod back-off.
|
||||||
func (p *PodBackoff) Gc() {
|
func (p *PodBackoff) Gc() {
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
|
|
|
@ -28,9 +28,11 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
|
||||||
|
// Init the core api installation
|
||||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestGroup defines a api group for testing.
|
||||||
type TestGroup struct {
|
type TestGroup struct {
|
||||||
externalGroupVersion schema.GroupVersion
|
externalGroupVersion schema.GroupVersion
|
||||||
internalGroupVersion schema.GroupVersion
|
internalGroupVersion schema.GroupVersion
|
||||||
|
@ -39,7 +41,9 @@ type TestGroup struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
// Groups defines a TestGroup map.
|
||||||
Groups = make(map[string]TestGroup)
|
Groups = make(map[string]TestGroup)
|
||||||
|
// Test defines a TestGroup object.
|
||||||
Test TestGroup
|
Test TestGroup
|
||||||
|
|
||||||
serializer runtime.SerializerInfo
|
serializer runtime.SerializerInfo
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DefaultBindAllHostIP defines the default ip address used to bind to all host.
|
||||||
const DefaultBindAllHostIP = "0.0.0.0"
|
const DefaultBindAllHostIP = "0.0.0.0"
|
||||||
|
|
||||||
// ProtocolPort represents a protocol port pair, e.g. tcp:80.
|
// ProtocolPort represents a protocol port pair, e.g. tcp:80.
|
||||||
|
|
|
@ -68,7 +68,7 @@ func TestSortableList(t *testing.T) {
|
||||||
podList := SortableList{CompFunc: higherPriority}
|
podList := SortableList{CompFunc: higherPriority}
|
||||||
// Add a few Pods with different priorities from lowest to highest priority.
|
// Add a few Pods with different priorities from lowest to highest priority.
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
var p int32 = int32(i)
|
var p = int32(i)
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
|
|
|
@ -334,7 +334,7 @@ func TestSchedulerExtender(t *testing.T) {
|
||||||
FilterVerb: filter,
|
FilterVerb: filter,
|
||||||
PrioritizeVerb: prioritize,
|
PrioritizeVerb: prioritize,
|
||||||
Weight: 3,
|
Weight: 3,
|
||||||
EnableHttps: false,
|
EnableHTTPS: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
URLPrefix: es2.URL,
|
URLPrefix: es2.URL,
|
||||||
|
@ -342,14 +342,14 @@ func TestSchedulerExtender(t *testing.T) {
|
||||||
PrioritizeVerb: prioritize,
|
PrioritizeVerb: prioritize,
|
||||||
BindVerb: bind,
|
BindVerb: bind,
|
||||||
Weight: 4,
|
Weight: 4,
|
||||||
EnableHttps: false,
|
EnableHTTPS: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
URLPrefix: es3.URL,
|
URLPrefix: es3.URL,
|
||||||
FilterVerb: filter,
|
FilterVerb: filter,
|
||||||
PrioritizeVerb: prioritize,
|
PrioritizeVerb: prioritize,
|
||||||
Weight: 10,
|
Weight: 10,
|
||||||
EnableHttps: false,
|
EnableHTTPS: false,
|
||||||
NodeCacheCapable: true,
|
NodeCacheCapable: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
Loading…
Reference in New Issue