Fix golint errors in `pkg/scheduler` based on golint check

pull/6/head
tossmilestone 2018-02-08 14:42:19 +08:00
parent a4e49d19f9
commit 3fdacfead5
51 changed files with 512 additions and 304 deletions

View File

@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package scheduler contains a generic Scheduler interface and several
// Package algorithm contains a generic Scheduler interface and several
// implementations.
package algorithm // import "k8s.io/kubernetes/pkg/scheduler/algorithm"

View File

@ -30,28 +30,51 @@ var (
// be made to pass by removing pods, or you change an existing predicate so that
// it can never be made to pass by removing pods, you need to add the predicate
// failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go
ErrDiskConflict = newPredicateFailureError("NoDiskConflict", "node(s) had no available disk")
ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict", "node(s) had no available volume zone")
ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector", "node(s) didn't match node selector")
ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity", "node(s) didn't match pod affinity/anti-affinity")
ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch", "node(s) didn't match pod affinity rules")
ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch", "node(s) didn't match pod anti-affinity rules")
// ErrDiskConflict is used for NoDiskConflict predicate error.
ErrDiskConflict = newPredicateFailureError("NoDiskConflict", "node(s) had no available disk")
// ErrVolumeZoneConflict is used for NoVolumeZoneConflict predicate error.
ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict", "node(s) had no available volume zone")
// ErrNodeSelectorNotMatch is used for MatchNodeSelector predicate error.
ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector", "node(s) didn't match node selector")
// ErrPodAffinityNotMatch is used for MatchInterPodAffinity predicate error.
ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity", "node(s) didn't match pod affinity/anti-affinity")
// ErrPodAffinityRulesNotMatch is used for PodAffinityRulesNotMatch predicate error.
ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch", "node(s) didn't match pod affinity rules")
// ErrPodAntiAffinityRulesNotMatch is used for PodAntiAffinityRulesNotMatch predicate error.
ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch", "node(s) didn't match pod anti-affinity rules")
// ErrExistingPodsAntiAffinityRulesNotMatch is used for ExistingPodsAntiAffinityRulesNotMatch predicate error.
ErrExistingPodsAntiAffinityRulesNotMatch = newPredicateFailureError("ExistingPodsAntiAffinityRulesNotMatch", "node(s) didn't satisfy existing pods anti-affinity rules")
ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints", "node(s) had taints that the pod didn't tolerate")
ErrPodNotMatchHostName = newPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts", "node(s) didn't have free ports for the requested pod ports")
ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence", "node(s) didn't have the requested labels")
ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity", "node(s) didn't match service affinity")
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount", "node(s) exceed max volume count")
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure")
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure", "node(s) had disk pressure")
ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk", "node(s) were out of disk space")
ErrNodeNotReady = newPredicateFailureError("NodeNotReady", "node(s) were not ready")
ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable", "node(s) had unavailable network")
ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable", "node(s) were unschedulable")
ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition", "node(s) had unknown conditions")
ErrVolumeNodeConflict = newPredicateFailureError("VolumeNodeAffinityConflict", "node(s) had volume node affinity conflict")
ErrVolumeBindConflict = newPredicateFailureError("VolumeBindingNoMatch", "node(s) didn't find available persistent volumes to bind")
// ErrTaintsTolerationsNotMatch is used for PodToleratesNodeTaints predicate error.
ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints", "node(s) had taints that the pod didn't tolerate")
// ErrPodNotMatchHostName is used for HostName predicate error.
ErrPodNotMatchHostName = newPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
// ErrPodNotFitsHostPorts is used for PodFitsHostPorts predicate error.
ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts", "node(s) didn't have free ports for the requested pod ports")
// ErrNodeLabelPresenceViolated is used for CheckNodeLabelPresence predicate error.
ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence", "node(s) didn't have the requested labels")
// ErrServiceAffinityViolated is used for CheckServiceAffinity predicate error.
ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity", "node(s) didn't match service affinity")
// ErrMaxVolumeCountExceeded is used for MaxVolumeCount predicate error.
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount", "node(s) exceed max volume count")
// ErrNodeUnderMemoryPressure is used for NodeUnderMemoryPressure predicate error.
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure")
// ErrNodeUnderDiskPressure is used for NodeUnderDiskPressure predicate error.
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure", "node(s) had disk pressure")
// ErrNodeOutOfDisk is used for NodeOutOfDisk predicate error.
ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk", "node(s) were out of disk space")
// ErrNodeNotReady is used for NodeNotReady predicate error.
ErrNodeNotReady = newPredicateFailureError("NodeNotReady", "node(s) were not ready")
// ErrNodeNetworkUnavailable is used for NodeNetworkUnavailable predicate error.
ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable", "node(s) had unavailable network")
// ErrNodeUnschedulable is used for NodeUnschedulable predicate error.
ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable", "node(s) were unschedulable")
// ErrNodeUnknownCondition is used for NodeUnknownCondition predicate error.
ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition", "node(s) had unknown conditions")
// ErrVolumeNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrVolumeNodeConflict = newPredicateFailureError("VolumeNodeAffinityConflict", "node(s) had volume node affinity conflict")
// ErrVolumeBindConflict is used for VolumeBindingNoMatch predicate error.
ErrVolumeBindConflict = newPredicateFailureError("VolumeBindingNoMatch", "node(s) didn't find available persistent volumes to bind")
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
// as ErrFakePredicate.
ErrFakePredicate = newPredicateFailureError("FakePredicateError", "Nodes failed the fake predicate")
@ -67,6 +90,7 @@ type InsufficientResourceError struct {
capacity int64
}
// NewInsufficientResourceError returns an InsufficientResourceError.
func NewInsufficientResourceError(resourceName v1.ResourceName, requested, used, capacity int64) *InsufficientResourceError {
return &InsufficientResourceError{
ResourceName: resourceName,
@ -81,14 +105,17 @@ func (e *InsufficientResourceError) Error() string {
e.ResourceName, e.requested, e.used, e.capacity)
}
// GetReason returns the reason of the InsufficientResourceError.
func (e *InsufficientResourceError) GetReason() string {
return fmt.Sprintf("Insufficient %v", e.ResourceName)
}
// GetInsufficientAmount returns the amount of the insufficient resource of the error.
func (e *InsufficientResourceError) GetInsufficientAmount() int64 {
return e.requested - (e.capacity - e.used)
}
// PredicateFailureError describes a failure error of predicate.
type PredicateFailureError struct {
PredicateName string
PredicateDesc string
@ -102,18 +129,22 @@ func (e *PredicateFailureError) Error() string {
return fmt.Sprintf("Predicate %s failed", e.PredicateName)
}
// GetReason returns the reason of the PredicateFailureError.
func (e *PredicateFailureError) GetReason() string {
return e.PredicateDesc
}
// FailureReason describes a failure reason.
type FailureReason struct {
reason string
}
// NewFailureReason creates a FailureReason with message.
func NewFailureReason(msg string) *FailureReason {
return &FailureReason{reason: msg}
}
// GetReason returns the reason of the FailureReason.
func (e *FailureReason) GetReason() string {
return e.reason
}

View File

@ -29,6 +29,7 @@ import (
"github.com/golang/glog"
)
// PredicateMetadataFactory defines a factory of predicate metadata.
type PredicateMetadataFactory struct {
podLister algorithm.PodLister
}
@ -57,18 +58,20 @@ type predicateMetadata struct {
// Ensure that predicateMetadata implements algorithm.PredicateMetadata.
var _ algorithm.PredicateMetadata = &predicateMetadata{}
// PredicateMetadataProducer: Helper types/variables...
// PredicateMetadataProducer function produces predicate metadata.
type PredicateMetadataProducer func(pm *predicateMetadata)
var predicateMetaProducerRegisterLock sync.Mutex
var predicateMetadataProducers map[string]PredicateMetadataProducer = make(map[string]PredicateMetadataProducer)
var predicateMetadataProducers = make(map[string]PredicateMetadataProducer)
// RegisterPredicateMetadataProducer registers a PredicateMetadataProducer.
func RegisterPredicateMetadataProducer(predicateName string, precomp PredicateMetadataProducer) {
predicateMetaProducerRegisterLock.Lock()
defer predicateMetaProducerRegisterLock.Unlock()
predicateMetadataProducers[predicateName] = precomp
}
// NewPredicateMetadataFactory creates a PredicateMetadataFactory.
func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.PredicateMetadataProducer {
factory := &PredicateMetadataFactory{
podLister,
@ -105,7 +108,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf
func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
deletedPodFullName := schedutil.GetPodFullName(deletedPod)
if deletedPodFullName == schedutil.GetPodFullName(meta.pod) {
return fmt.Errorf("deletedPod and meta.pod must not be the same.")
return fmt.Errorf("deletedPod and meta.pod must not be the same")
}
// Delete any anti-affinity rule from the deletedPod.
delete(meta.matchingAntiAffinityTerms, deletedPodFullName)
@ -132,10 +135,10 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error {
addedPodFullName := schedutil.GetPodFullName(addedPod)
if addedPodFullName == schedutil.GetPodFullName(meta.pod) {
return fmt.Errorf("addedPod and meta.pod must not be the same.")
return fmt.Errorf("addedPod and meta.pod must not be the same")
}
if nodeInfo.Node() == nil {
return fmt.Errorf("Invalid node in nodeInfo.")
return fmt.Errorf("invalid node in nodeInfo")
}
// Add matching anti-affinity terms of the addedPod to the map.
podMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(meta.pod, addedPod, nodeInfo.Node())

View File

@ -92,24 +92,24 @@ var _ = sort.Interface(&sortableServices{})
// Note: this function does not compare podRequest.
func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
if !reflect.DeepEqual(meta1.pod, meta2.pod) {
return fmt.Errorf("pods are not the same.")
return fmt.Errorf("pods are not the same")
}
if meta1.podBestEffort != meta2.podBestEffort {
return fmt.Errorf("podBestEfforts are not equal.")
return fmt.Errorf("podBestEfforts are not equal")
}
if meta1.serviceAffinityInUse != meta1.serviceAffinityInUse {
return fmt.Errorf("serviceAffinityInUses are not equal.")
return fmt.Errorf("serviceAffinityInUses are not equal")
}
if len(meta1.podPorts) != len(meta2.podPorts) {
return fmt.Errorf("podPorts are not equal.")
return fmt.Errorf("podPorts are not equal")
}
for !reflect.DeepEqual(meta1.podPorts, meta2.podPorts) {
return fmt.Errorf("podPorts are not equal.")
return fmt.Errorf("podPorts are not equal")
}
sortAntiAffinityTerms(meta1.matchingAntiAffinityTerms)
sortAntiAffinityTerms(meta2.matchingAntiAffinityTerms)
if !reflect.DeepEqual(meta1.matchingAntiAffinityTerms, meta2.matchingAntiAffinityTerms) {
return fmt.Errorf("matchingAntiAffinityTerms are not euqal.")
return fmt.Errorf("matchingAntiAffinityTerms are not euqal")
}
if meta1.serviceAffinityInUse {
sortablePods1 := sortablePods(meta1.serviceAffinityMatchingPodList)
@ -117,7 +117,7 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
sortablePods2 := sortablePods(meta2.serviceAffinityMatchingPodList)
sort.Sort(sortablePods2)
if !reflect.DeepEqual(sortablePods1, sortablePods2) {
return fmt.Errorf("serviceAffinityMatchingPodLists are not euqal.")
return fmt.Errorf("serviceAffinityMatchingPodLists are not euqal")
}
sortableServices1 := sortableServices(meta1.serviceAffinityMatchingPodServices)
@ -125,7 +125,7 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
sortableServices2 := sortableServices(meta2.serviceAffinityMatchingPodServices)
sort.Sort(sortableServices2)
if !reflect.DeepEqual(sortableServices1, sortableServices2) {
return fmt.Errorf("serviceAffinityMatchingPodServices are not euqal.")
return fmt.Errorf("serviceAffinityMatchingPodServices are not euqal")
}
}
return nil

View File

@ -48,25 +48,44 @@ import (
)
const (
MatchInterPodAffinityPred = "MatchInterPodAffinity"
CheckVolumeBindingPred = "CheckVolumeBinding"
CheckNodeConditionPred = "CheckNodeCondition"
GeneralPred = "GeneralPredicates"
HostNamePred = "HostName"
PodFitsHostPortsPred = "PodFitsHostPorts"
MatchNodeSelectorPred = "MatchNodeSelector"
PodFitsResourcesPred = "PodFitsResources"
NoDiskConflictPred = "NoDiskConflict"
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
// MatchInterPodAffinityPred defines the name of predicate MatchInterPodAffinity.
MatchInterPodAffinityPred = "MatchInterPodAffinity"
// CheckVolumeBindingPred defines the name of predicate CheckVolumeBinding.
CheckVolumeBindingPred = "CheckVolumeBinding"
// CheckNodeConditionPred defines the name of predicate CheckNodeCondition.
CheckNodeConditionPred = "CheckNodeCondition"
// GeneralPred defines the name of predicate GeneralPredicates.
GeneralPred = "GeneralPredicates"
// HostNamePred defines the name of predicate HostName.
HostNamePred = "HostName"
// PodFitsHostPortsPred defines the name of predicate PodFitsHostPorts.
PodFitsHostPortsPred = "PodFitsHostPorts"
// MatchNodeSelectorPred defines the name of predicate MatchNodeSelector.
MatchNodeSelectorPred = "MatchNodeSelector"
// PodFitsResourcesPred defines the name of predicate PodFitsResources.
PodFitsResourcesPred = "PodFitsResources"
// NoDiskConflictPred defines the name of predicate NoDiskConflict.
NoDiskConflictPred = "NoDiskConflict"
// PodToleratesNodeTaintsPred defines the name of predicate PodToleratesNodeTaints.
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
// PodToleratesNodeNoExecuteTaintsPred defines the name of predicate PodToleratesNodeNoExecuteTaints.
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
checkServiceAffinityPred = "checkServiceAffinity"
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure"
CheckNodeDiskPressurePred = "CheckNodeDiskPressure"
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
// checkServiceAffinityPred defines the name of predicate checkServiceAffinity.
checkServiceAffinityPred = "checkServiceAffinity"
// MaxEBSVolumeCountPred defines the name of predicate MaxEBSVolumeCount.
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
// MaxGCEPDVolumeCountPred defines the name of predicate MaxGCEPDVolumeCount.
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
// MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount.
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
// NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict.
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
// CheckNodeMemoryPressurePred defines the name of predicate CheckNodeMemoryPressure.
CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure"
// CheckNodeDiskPressurePred defines the name of predicate CheckNodeDiskPressure.
CheckNodeDiskPressurePred = "CheckNodeDiskPressure"
// DefaultMaxEBSVolumes is the limit for volumes attached to an instance.
// Amazon recommends no more than 40; the system root volume uses at least one.
@ -83,11 +102,11 @@ const (
// KubeMaxPDVols defines the maximum number of PD Volumes per kubelet
KubeMaxPDVols = "KUBE_MAX_PD_VOLS"
// for EBSVolumeFilter
// EBSVolumeFilterType defines the filter name for EBSVolumeFilter.
EBSVolumeFilterType = "EBS"
// for GCEPDVolumeFilter
// GCEPDVolumeFilterType defines the filter name for GCEPDVolumeFilter.
GCEPDVolumeFilterType = "GCE"
// for AzureDiskVolumeFilter
// AzureDiskVolumeFilterType defines the filter name for AzureDiskVolumeFilter.
AzureDiskVolumeFilterType = "AzureDisk"
)
@ -114,11 +133,12 @@ var (
CheckNodeMemoryPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
)
// NodeInfo: Other types for predicate functions...
// NodeInfo interface represents anything that can get node object from node ID.
type NodeInfo interface {
GetNodeInfo(nodeID string) (*v1.Node, error)
}
// PersistentVolumeInfo interface represents anything that can get persistent volume object by PV ID.
type PersistentVolumeInfo interface {
GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)
}
@ -128,18 +148,23 @@ type CachedPersistentVolumeInfo struct {
corelisters.PersistentVolumeLister
}
func PredicatesOrdering() []string {
// Ordering returns the ordering of predicates.
func Ordering() []string {
return predicatesOrdering
}
// SetPredicatesOrdering sets the ordering of predicates.
func SetPredicatesOrdering(names []string) {
predicatesOrdering = names
}
// GetPersistentVolumeInfo returns a persistent volume object by PV ID.
func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
return c.Get(pvID)
}
// PersistentVolumeClaimInfo interface represents anything that can get a PVC object in
// specified namespace with specified name.
type PersistentVolumeClaimInfo interface {
GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)
}
@ -154,6 +179,7 @@ func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace
return c.PersistentVolumeClaims(namespace).Get(name)
}
// CachedNodeInfo implements NodeInfo
type CachedNodeInfo struct {
corelisters.NodeLister
}
@ -173,6 +199,7 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) {
return node, nil
}
// StorageClassInfo interface represents anything that can get a storage class object by class name.
type StorageClassInfo interface {
GetStorageClassInfo(className string) (*storagev1.StorageClass, error)
}
@ -182,6 +209,7 @@ type CachedStorageClassInfo struct {
storagelisters.StorageClassLister
}
// GetStorageClassInfo get StorageClass by class name.
func (c *CachedStorageClassInfo) GetStorageClassInfo(className string) (*storagev1.StorageClass, error) {
return c.Get(className)
}
@ -253,6 +281,7 @@ func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *sch
return true, nil, nil
}
// MaxPDVolumeCountChecker contains information to check the max number of volumes for a predicate.
type MaxPDVolumeCountChecker struct {
filter VolumeFilter
maxVolumes int
@ -341,13 +370,13 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
// Until we know real ID of the volume use namespace/pvcName as substitute
// with a random prefix (calculated and stored inside 'c' during initialization)
// to avoid conflicts with existing volume IDs.
pvId := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName)
pvID := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName)
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
if err != nil || pvc == nil {
// if the PVC is not found, log the error and count the PV towards the PV limit
glog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)
filteredVolumes[pvId] = true
filteredVolumes[pvID] = true
continue
}
@ -358,7 +387,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
// original PV where it was bound to -> log the error and count
// the PV towards the PV limit
glog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
filteredVolumes[pvId] = true
filteredVolumes[pvID] = true
continue
}
@ -367,7 +396,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
// if the PV is not found, log the error
// and count the PV towards the PV limit
glog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
filteredVolumes[pvId] = true
filteredVolumes[pvID] = true
continue
}
@ -424,7 +453,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.Predicat
}
// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes
var EBSVolumeFilter VolumeFilter = VolumeFilter{
var EBSVolumeFilter = VolumeFilter{
FilterVolume: func(vol *v1.Volume) (string, bool) {
if vol.AWSElasticBlockStore != nil {
return vol.AWSElasticBlockStore.VolumeID, true
@ -441,7 +470,7 @@ var EBSVolumeFilter VolumeFilter = VolumeFilter{
}
// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes
var GCEPDVolumeFilter VolumeFilter = VolumeFilter{
var GCEPDVolumeFilter = VolumeFilter{
FilterVolume: func(vol *v1.Volume) (string, bool) {
if vol.GCEPersistentDisk != nil {
return vol.GCEPersistentDisk.PDName, true
@ -458,7 +487,7 @@ var GCEPDVolumeFilter VolumeFilter = VolumeFilter{
}
// AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes
var AzureDiskVolumeFilter VolumeFilter = VolumeFilter{
var AzureDiskVolumeFilter = VolumeFilter{
FilterVolume: func(vol *v1.Volume) (string, bool) {
if vol.AzureDisk != nil {
return vol.AzureDisk.DiskName, true
@ -474,6 +503,7 @@ var AzureDiskVolumeFilter VolumeFilter = VolumeFilter{
},
}
// VolumeZoneChecker contains information to check the volume zone for a predicate.
type VolumeZoneChecker struct {
pvInfo PersistentVolumeInfo
pvcInfo PersistentVolumeClaimInfo
@ -818,11 +848,14 @@ func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedu
return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil
}
// NodeLabelChecker contains information to check node labels for a predicate.
type NodeLabelChecker struct {
labels []string
presence bool
}
// NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the
// node labels which match a filter that it requests.
func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate {
labelChecker := &NodeLabelChecker{
labels: labels,
@ -860,6 +893,7 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.Pr
return true, nil, nil
}
// ServiceAffinity defines a struct used for create service affinity predicates.
type ServiceAffinity struct {
podLister algorithm.PodLister
serviceLister algorithm.ServiceLister
@ -889,6 +923,7 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace)
}
// NewServiceAffinityPredicate creates a ServiceAffinity.
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataProducer) {
affinity := &ServiceAffinity{
podLister: podLister,
@ -1071,11 +1106,13 @@ func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo
return len(predicateFails) == 0, predicateFails, nil
}
// PodAffinityChecker contains information to check pod affinity.
type PodAffinityChecker struct {
info NodeInfo
podLister algorithm.PodLister
}
// NewPodAffinityPredicate creates a PodAffinityChecker.
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate {
checker := &PodAffinityChecker{
info: info,
@ -1151,6 +1188,7 @@ func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, pods []*v
return false, matchingPodExists, nil
}
// GetPodAffinityTerms gets pod affinity terms by a pod affinity object.
func GetPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) {
if podAffinity != nil {
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
@ -1164,6 +1202,7 @@ func GetPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTer
return terms
}
// GetPodAntiAffinityTerms gets pod affinity terms by a pod anti-affinity.
func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
if podAntiAffinity != nil {
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
@ -1496,6 +1535,7 @@ func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata,
return len(reasons) == 0, reasons, nil
}
// VolumeBindingChecker contains information to check a volume binding.
type VolumeBindingChecker struct {
binder *volumebinder.VolumeBinder
}

View File

@ -3145,7 +3145,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
nodeInfo.SetNode(&node)
nodeInfoMap := map[string]*schedulercache.NodeInfo{node.Name: nodeInfo}
var meta algorithm.PredicateMetadata = nil
var meta algorithm.PredicateMetadata
if !test.nometa {
meta = PredicateMetadata(test.pod, nodeInfoMap)

View File

@ -23,8 +23,10 @@ import (
storagev1 "k8s.io/api/storage/v1"
)
// FakePersistentVolumeClaimInfo declares a []v1.PersistentVolumeClaim type for testing.
type FakePersistentVolumeClaimInfo []v1.PersistentVolumeClaim
// GetPersistentVolumeClaimInfo gets PVC matching the namespace and PVC ID.
func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, pvcID string) (*v1.PersistentVolumeClaim, error) {
for _, pvc := range pvcs {
if pvc.Name == pvcID && pvc.Namespace == namespace {
@ -34,15 +36,19 @@ func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace
return nil, fmt.Errorf("Unable to find persistent volume claim: %s/%s", namespace, pvcID)
}
// FakeNodeInfo declares a v1.Node type for testing.
type FakeNodeInfo v1.Node
// GetNodeInfo return a fake node info object.
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
node := v1.Node(n)
return &node, nil
}
// FakeNodeListInfo declares a []v1.Node type for testing.
type FakeNodeListInfo []v1.Node
// GetNodeInfo returns a fake node object in the fake nodes.
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
for _, node := range nodes {
if node.Name == nodeName {
@ -52,8 +58,10 @@ func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
}
// FakePersistentVolumeInfo declares a []v1.PersistentVolume type for testing.
type FakePersistentVolumeInfo []v1.PersistentVolume
// GetPersistentVolumeInfo returns a fake PV object in the fake PVs by PV ID.
func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
for _, pv := range pvs {
if pv.Name == pvID {
@ -63,8 +71,10 @@ func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.Pe
return nil, fmt.Errorf("Unable to find persistent volume: %s", pvID)
}
// FakeStorageClassInfo declares a []storagev1.StorageClass type for testing.
type FakeStorageClassInfo []storagev1.StorageClass
// GetStorageClassInfo returns a fake storage class object in the fake storage classes by name.
func (classes FakeStorageClassInfo) GetStorageClassInfo(name string) (*storagev1.StorageClass, error) {
for _, sc := range classes {
if sc.Name == name {

View File

@ -92,17 +92,18 @@ func (e *EquivalencePodGenerator) getEquivalencePod(pod *v1.Pod) interface{} {
// to be equivalent
for _, ref := range pod.OwnerReferences {
if ref.Controller != nil && *ref.Controller {
if pvcSet, err := e.getPVCSet(pod); err == nil {
pvcSet, err := e.getPVCSet(pod)
if err == nil {
// A pod can only belongs to one controller, so let's return.
return &EquivalencePod{
ControllerRef: ref,
PVCSet: pvcSet,
}
} else {
// If error encountered, log warning and return nil (i.e. no equivalent pod found)
glog.Warningf("[EquivalencePodGenerator] for pod: %v failed due to: %v", pod.GetName(), err)
return nil
}
// If error encountered, log warning and return nil (i.e. no equivalent pod found)
glog.Warningf("[EquivalencePodGenerator] for pod: %v failed due to: %v", pod.GetName(), err)
return nil
}
}
return nil

View File

@ -28,7 +28,7 @@ import (
)
func TestImageLocalityPriority(t *testing.T) {
test_40_250 := v1.PodSpec{
test40250 := v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/40",
@ -39,7 +39,7 @@ func TestImageLocalityPriority(t *testing.T) {
},
}
test_40_140 := v1.PodSpec{
test40140 := v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/40",
@ -50,7 +50,7 @@ func TestImageLocalityPriority(t *testing.T) {
},
}
test_min_max := v1.PodSpec{
testMinMax := v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/10",
@ -61,7 +61,7 @@ func TestImageLocalityPriority(t *testing.T) {
},
}
node_40_140_2000 := v1.NodeStatus{
node401402000 := v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
@ -87,7 +87,7 @@ func TestImageLocalityPriority(t *testing.T) {
},
}
node_250_10 := v1.NodeStatus{
node25010 := v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
@ -122,8 +122,8 @@ func TestImageLocalityPriority(t *testing.T) {
// Node2
// Image: gcr.io/250 250MB
// Score: (250M-23M)/97.7M + 1 = 3
pod: &v1.Pod{Spec: test_40_250},
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
pod: &v1.Pod{Spec: test40250},
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
test: "two images spread on two nodes, prefer the larger image one",
},
@ -137,8 +137,8 @@ func TestImageLocalityPriority(t *testing.T) {
// Node2
// Image: not present
// Score: 0
pod: &v1.Pod{Spec: test_40_140},
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
pod: &v1.Pod{Spec: test40140},
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
test: "two images on one node, prefer this node",
},
@ -152,8 +152,8 @@ func TestImageLocalityPriority(t *testing.T) {
// Node2
// Image: gcr.io/10 10MB
// Score: 10 < min score = 0
pod: &v1.Pod{Spec: test_min_max},
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
pod: &v1.Pod{Spec: testMinMax},
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
test: "if exceed limit, use limit",
},

View File

@ -32,6 +32,7 @@ import (
"github.com/golang/glog"
)
// InterPodAffinity contains information to calculate inter pod affinity.
type InterPodAffinity struct {
info predicates.NodeInfo
nodeLister algorithm.NodeLister
@ -39,6 +40,7 @@ type InterPodAffinity struct {
hardPodAffinityWeight int32
}
// NewInterPodAffinityPriority creates an InterPodAffinity.
func NewInterPodAffinityPriority(
info predicates.NodeInfo,
nodeLister algorithm.NodeLister,

View File

@ -24,7 +24,7 @@ import (
var (
leastResourcePriority = &ResourceAllocationPriority{"LeastResourceAllocation", leastResourceScorer}
// LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.
// LeastRequestedPriorityMap is a priority function that favors nodes with fewer requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
//

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// PriorityMetadataFactory is a factory to produce PriorityMetadata.
type PriorityMetadataFactory struct {
serviceLister algorithm.ServiceLister
controllerLister algorithm.ControllerLister
@ -32,6 +33,7 @@ type PriorityMetadataFactory struct {
statefulSetLister algorithm.StatefulSetLister
}
// NewPriorityMetadataFactory creates a PriorityMetadataFactory.
func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.PriorityMetadataProducer {
factory := &PriorityMetadataFactory{
serviceLister: serviceLister,

View File

@ -32,7 +32,7 @@ import (
func TestPriorityMetadata(t *testing.T) {
nonZeroReqs := &schedulercache.Resource{}
nonZeroReqs.MilliCPU = priorityutil.DefaultMilliCpuRequest
nonZeroReqs.MilliCPU = priorityutil.DefaultMilliCPURequest
nonZeroReqs.Memory = priorityutil.DefaultMemoryRequest
specifiedReqs := &schedulercache.Resource{}

View File

@ -24,7 +24,7 @@ import (
var (
mostResourcePriority = &ResourceAllocationPriority{"MostResourceAllocation", mostResourceScorer}
// MostRequestedPriority is a priority function that favors nodes with most requested resources.
// MostRequestedPriorityMap is a priority function that favors nodes with most requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the maximum of the average of the fraction of requested to capacity.
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2

View File

@ -83,7 +83,7 @@ func TestMostRequested(t *testing.T) {
},
},
}
bigCpuAndMemory := v1.PodSpec{
bigCPUAndMemory := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
@ -201,7 +201,7 @@ func TestMostRequested(t *testing.T) {
Memory Score: 9000 > 8000 return 0
Node2 Score: (5 + 0) / 2 = 2
*/
pod: &v1.Pod{Spec: bigCpuAndMemory},
pod: &v1.Pod{Spec: bigCPUAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}},
test: "resources requested with more than the node, pods scheduled with resources",

View File

@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// CalculateNodeAffinityPriority prioritizes nodes according to node affinity scheduling preferences
// CalculateNodeAffinityPriorityMap prioritizes nodes according to node affinity scheduling preferences
// indicated in PreferredDuringSchedulingIgnoredDuringExecution. Each time a node match a preferredSchedulingTerm,
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
@ -74,4 +74,5 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
}, nil
}
// CalculateNodeAffinityPriorityReduce is a reduce function for node affinity priority calculation.
var CalculateNodeAffinityPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, false)

View File

@ -26,11 +26,13 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// NodeLabelPrioritizer contains information to calculate node label priority.
type NodeLabelPrioritizer struct {
label string
presence bool
}
// NewNodeLabelPriority creates a NodeLabelPrioritizer.
func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
labelPrioritizer := &NodeLabelPrioritizer{
label: label,
@ -39,7 +41,7 @@ func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFun
return labelPrioritizer.CalculateNodeLabelPriorityMap, nil
}
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
// CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value.
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
// If presence is false, prioritizes nodes that do not have the specified label.
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {

View File

@ -27,6 +27,8 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation
// "scheduler.alpha.kubernetes.io/preferAvoidPods".
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node()
if node == nil {

View File

@ -26,11 +26,14 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// ResourceAllocationPriority contains information to calculate resource allocation priority.
type ResourceAllocationPriority struct {
Name string
scorer func(requested, allocable *schedulercache.Resource) int64
}
// PriorityMap priorities nodes according to the resource allocations on the node.
// It will use `scorer` function to calculate the score.
func (r *ResourceAllocationPriority) PriorityMap(
pod *v1.Pod,
meta interface{},

View File

@ -33,6 +33,7 @@ import (
// TODO: Any way to justify this weighting?
const zoneWeighting float64 = 2.0 / 3.0
// SelectorSpread contains information to calculate selector spread priority.
type SelectorSpread struct {
serviceLister algorithm.ServiceLister
controllerLister algorithm.ControllerLister
@ -40,6 +41,7 @@ type SelectorSpread struct {
statefulSetLister algorithm.StatefulSetLister
}
// NewSelectorSpreadPriority creates a SelectorSpread.
func NewSelectorSpreadPriority(
serviceLister algorithm.ServiceLister,
controllerLister algorithm.ControllerLister,
@ -125,16 +127,16 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
if result[i].Score > maxCountByNodeName {
maxCountByNodeName = result[i].Score
}
zoneId := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
if zoneId == "" {
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
if zoneID == "" {
continue
}
countsByZone[zoneId] += result[i].Score
countsByZone[zoneID] += result[i].Score
}
for zoneId := range countsByZone {
if countsByZone[zoneId] > maxCountByZone {
maxCountByZone = countsByZone[zoneId]
for zoneID := range countsByZone {
if countsByZone[zoneID] > maxCountByZone {
maxCountByZone = countsByZone[zoneID]
}
}
@ -152,11 +154,11 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
}
// If there is zone information present, incorporate it
if haveZones {
zoneId := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
if zoneId != "" {
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
if zoneID != "" {
zoneScore := MaxPriorityFloat64
if maxCountByZone > 0 {
zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneId]) / maxCountByZoneFloat64)
zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64)
}
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
}
@ -171,12 +173,14 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
return nil
}
// ServiceAntiAffinity contains information to calculate service anti-affinity priority.
type ServiceAntiAffinity struct {
podLister algorithm.PodLister
serviceLister algorithm.ServiceLister
label string
}
// NewServiceAntiAffinityPriority creates a ServiceAntiAffinity.
func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, label string) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
antiAffinity := &ServiceAntiAffinity{
podLister: podLister,

View File

@ -26,7 +26,10 @@ import "k8s.io/api/core/v1"
// consuming no resources whatsoever. We chose these values to be similar to the
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
// As described in #11713, we use request instead of limit to deal with resource requirements.
const DefaultMilliCpuRequest int64 = 100 // 0.1 core
// DefaultMilliCPURequest defines default milli cpu request number.
const DefaultMilliCPURequest int64 = 100 // 0.1 core
// DefaultMemoryRequest defines default memory request size.
const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
// GetNonzeroRequests returns the default resource request if none is found or what is provided on the request
@ -36,7 +39,7 @@ func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
var outMilliCPU, outMemory int64
// Override if un-set, but not if explicitly set to zero
if _, found := (*requests)[v1.ResourceCPU]; !found {
outMilliCPU = DefaultMilliCpuRequest
outMilliCPU = DefaultMilliCPURequest
} else {
outMilliCPU = requests.Cpu().MilliValue()
}

View File

@ -35,7 +35,7 @@ func TestGetNonzeroRequests(t *testing.T) {
{
"cpu_and_memory_not_found",
v1.ResourceList{},
DefaultMilliCpuRequest,
DefaultMilliCPURequest,
DefaultMemoryRequest,
},
{
@ -51,7 +51,7 @@ func TestGetNonzeroRequests(t *testing.T) {
v1.ResourceList{
v1.ResourceMemory: resource.MustParse("400Mi"),
},
DefaultMilliCpuRequest,
DefaultMilliCPURequest,
400 * 1024 * 1024,
},
{

View File

@ -70,6 +70,7 @@ func NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool {
return false
}
// Topologies contains topologies information of nodes.
type Topologies struct {
DefaultKeys []string
}

View File

@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetControllerRef gets pod's owner controller reference from a pod object.
func GetControllerRef(pod *v1.Pod) *metav1.OwnerReference {
if len(pod.OwnerReferences) == 0 {
return nil

View File

@ -47,10 +47,12 @@ type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*sche
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{}
// PriorityFunction is a function that computes scores for all nodes.
// DEPRECATED
// Use Map-Reduce pattern for priority functions.
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
// PriorityConfig is a config used for a priority function.
type PriorityConfig struct {
Name string
Map PriorityMapFunction
@ -71,10 +73,12 @@ func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*sched
return nil
}
// PredicateFailureReason interface represents the failure reason of a predicate.
type PredicateFailureReason interface {
GetReason() string
}
// GetEquivalencePodFunc is a function that gets a EquivalencePod from a pod.
type GetEquivalencePodFunc func(pod *v1.Pod) interface{}
// NodeLister interface represents anything that can list nodes for a scheduler.
@ -157,6 +161,7 @@ func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.Sta
return nil, nil
}
// PredicateMetadata interface represents anything that can access a predicate metadata.
type PredicateMetadata interface {
ShallowCopy() PredicateMetadata
AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error

View File

@ -17,18 +17,18 @@ limitations under the License.
package algorithm
const (
// When feature-gate for TaintBasedEvictions=true flag is enabled,
// TaintNodeNotReady would be automatically added by node controller
// when node is not ready, and removed when node becomes ready.
// TaintNodeNotReady will be added when node is not ready
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when node becomes ready.
TaintNodeNotReady = "node.kubernetes.io/not-ready"
// DeprecatedTaintNodeNotReady is the deprecated version of TaintNodeNotReady.
// It is deprecated since 1.9
DeprecatedTaintNodeNotReady = "node.alpha.kubernetes.io/notReady"
// When feature-gate for TaintBasedEvictions=true flag is enabled,
// TaintNodeUnreachable would be automatically added by node controller
// when node becomes unreachable (corresponding to NodeReady status ConditionUnknown)
// TaintNodeUnreachable will be added when node becomes unreachable
// (corresponding to NodeReady status ConditionUnknown)
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when node becomes reachable (NodeReady status ConditionTrue).
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
@ -36,28 +36,28 @@ const (
// It is deprecated since 1.9
DeprecatedTaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable"
// When feature-gate for TaintBasedEvictions=true flag is enabled,
// TaintNodeOutOfDisk would be automatically added by node controller
// when node becomes out of disk, and removed when node has enough disk.
// TaintNodeOutOfDisk will be added when node becomes out of disk
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when node has enough disk.
TaintNodeOutOfDisk = "node.kubernetes.io/out-of-disk"
// When feature-gate for TaintBasedEvictions=true flag is enabled,
// TaintNodeMemoryPressure would be automatically added by node controller
// when node has memory pressure, and removed when node has enough memory.
// TaintNodeMemoryPressure will be added when node has memory pressure
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when node has enough memory.
TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
// When feature-gate for TaintBasedEvictions=true flag is enabled,
// TaintNodeDiskPressure would be automatically added by node controller
// when node has disk pressure, and removed when node has enough disk.
// TaintNodeDiskPressure will be added when node has disk pressure
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when node has enough disk.
TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
// When feature-gate for TaintBasedEvictions=true flag is enabled,
// TaintNodeNetworkUnavailable would be automatically added by node controller
// when node's network is unavailable, and removed when network becomes ready.
// TaintNodeNetworkUnavailable will be added when node's network is unavailable
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when network becomes ready.
TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
// When kubelet is started with the "external" cloud provider, then
// it sets this taint on a node to mark it as unusable, until a controller
// TaintExternalCloudProvider sets this taint on a node to mark it as unusable,
// when kubelet is started with the "external" cloud provider, until a controller
// from the cloud-controller-manager intitializes this node, and then removes
// the taint
TaintExternalCloudProvider = "node.cloudprovider.kubernetes.io/uninitialized"

View File

@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
// Init the api v1 package
_ "k8s.io/kubernetes/pkg/scheduler/api/v1"
)

View File

@ -31,8 +31,10 @@ var Scheme = runtime.NewScheme()
var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
var (
// SchemeBuilder defines a SchemeBuilder object.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
// AddToScheme is used to add stored functions to scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func init() {

View File

@ -26,15 +26,21 @@ import (
)
const (
MaxUint = ^uint(0)
MaxInt = int(MaxUint >> 1)
// MaxUint defines the max unsigned int value.
MaxUint = ^uint(0)
// MaxInt defines the max signed int value.
MaxInt = int(MaxUint >> 1)
// MaxTotalPriority defines the max total priority value.
MaxTotalPriority = MaxInt
MaxPriority = 10
MaxWeight = MaxInt / MaxPriority
// MaxPriority defines the max priority value.
MaxPriority = 10
// MaxWeight defines the max weight value.
MaxWeight = MaxInt / MaxPriority
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Policy describes a struct of a policy resource in api.
type Policy struct {
metav1.TypeMeta
// Holds the information to configure the fit predicate functions.
@ -60,6 +66,7 @@ type Policy struct {
AlwaysCheckAllPredicates bool
}
// PredicatePolicy describes a struct of a predicate policy.
type PredicatePolicy struct {
// Identifier of the predicate policy
// For a custom predicate, the name can be user-defined
@ -69,6 +76,7 @@ type PredicatePolicy struct {
Argument *PredicateArgument
}
// PriorityPolicy describes a struct of a priority policy.
type PriorityPolicy struct {
// Identifier of the priority policy
// For a custom priority, the name can be user-defined
@ -151,8 +159,8 @@ type ExtenderConfig struct {
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
// can implement this function.
BindVerb string
// EnableHttps specifies whether https should be used to communicate with the extender
EnableHttps bool
// EnableHTTPS specifies whether https should be used to communicate with the extender
EnableHTTPS bool
// TLSConfig specifies the transport layer security config
TLSConfig *restclient.TLSClientConfig
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
@ -220,6 +228,7 @@ type HostPriority struct {
Score int
}
// HostPriorityList declares a []HostPriority type.
type HostPriorityList []HostPriority
func (h HostPriorityList) Len() int {

View File

@ -36,9 +36,12 @@ func init() {
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
// SchemeBuilder is a v1 api scheme builder.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
// AddToScheme is used to add stored functions to scheme.
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {

View File

@ -27,6 +27,7 @@ import (
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Policy describes a struct for a policy resource used in api.
type Policy struct {
metav1.TypeMeta `json:",inline"`
// Holds the information to configure the fit predicate functions
@ -47,6 +48,7 @@ type Policy struct {
AlwaysCheckAllPredicates bool `json:"alwaysCheckAllPredicates"`
}
// PredicatePolicy describes a struct of a predicate policy.
type PredicatePolicy struct {
// Identifier of the predicate policy
// For a custom predicate, the name can be user-defined
@ -56,6 +58,7 @@ type PredicatePolicy struct {
Argument *PredicateArgument `json:"argument"`
}
// PriorityPolicy describes a struct of a priority policy.
type PriorityPolicy struct {
// Identifier of the priority policy
// For a custom priority, the name can be user-defined
@ -138,8 +141,8 @@ type ExtenderConfig struct {
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
// can implement this function.
BindVerb string
// EnableHttps specifies whether https should be used to communicate with the extender
EnableHttps bool `json:"enableHttps,omitempty"`
// EnableHTTPS specifies whether https should be used to communicate with the extender
EnableHTTPS bool `json:"enableHttps,omitempty"`
// TLSConfig specifies the transport layer security config
TLSConfig *restclient.TLSClientConfig `json:"tlsConfig,omitempty"`
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
@ -207,6 +210,7 @@ type HostPriority struct {
Score int `json:"score"`
}
// HostPriorityList declares a []HostPriority type.
type HostPriorityList []HostPriority
func (h HostPriorityList) Len() int {

View File

@ -62,6 +62,7 @@ func newAlgorithmCache() AlgorithmCache {
}
}
// NewEquivalenceCache creates a EquivalenceCache object.
func NewEquivalenceCache(getEquivalencePodFunc algorithm.GetEquivalencePodFunc) *EquivalenceCache {
return &EquivalenceCache{
getEquivalencePod: getEquivalencePodFunc,
@ -119,13 +120,11 @@ func (ec *EquivalenceCache) PredicateWithECache(
if hostPredicate, ok := predicateMap[equivalenceHash]; ok {
if hostPredicate.Fit {
return true, []algorithm.PredicateFailureReason{}, false
} else {
return false, hostPredicate.FailReasons, false
}
} else {
// is invalid
return false, []algorithm.PredicateFailureReason{}, true
return false, hostPredicate.FailReasons, false
}
// is invalid
return false, []algorithm.PredicateFailureReason{}, true
}
}
return false, []algorithm.PredicateFailureReason{}, true

View File

@ -33,6 +33,7 @@ import (
)
const (
// DefaultExtenderTimeout defines the default extender timeout in second.
DefaultExtenderTimeout = 5 * time.Second
)
@ -52,7 +53,7 @@ func makeTransport(config *schedulerapi.ExtenderConfig) (http.RoundTripper, erro
if config.TLSConfig != nil {
cfg.TLSClientConfig = *config.TLSConfig
}
if config.EnableHttps {
if config.EnableHTTPS {
hasCA := len(cfg.CAFile) > 0 || len(cfg.CAData) > 0
if !hasCA {
cfg.Insecure = true
@ -70,6 +71,7 @@ func makeTransport(config *schedulerapi.ExtenderConfig) (http.RoundTripper, erro
return utilnet.SetTransportDefaults(&http.Transport{}), nil
}
// NewHTTPExtender creates an HTTPExtender object.
func NewHTTPExtender(config *schedulerapi.ExtenderConfig) (algorithm.SchedulerExtender, error) {
if config.HTTPTimeout.Nanoseconds() == 0 {
config.HTTPTimeout = time.Duration(DefaultExtenderTimeout)

View File

@ -44,22 +44,27 @@ import (
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
)
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
// FitError describes a fit error of a pod.
type FitError struct {
Pod *v1.Pod
NumAllNodes int
FailedPredicates FailedPredicateMap
}
// Victims describes pod victims.
type Victims struct {
pods []*v1.Pod
numPDBViolations int
}
// ErrNoNodesAvailable defines an error of no nodes available.
var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods")
const (
// NoNodeAvailableMsg is used to format message when no nodes available.
NoNodeAvailableMsg = "0/%v nodes are available"
)
@ -68,7 +73,7 @@ func (f *FitError) Error() string {
reasons := make(map[string]int)
for _, predicates := range f.FailedPredicates {
for _, pred := range predicates {
reasons[pred.GetReason()] += 1
reasons[pred.GetReason()]++
}
}
@ -383,7 +388,7 @@ func addNominatedPods(podPriority int32, meta algorithm.PredicateMetadata,
if nominatedPods == nil || len(nominatedPods) == 0 {
return false, meta, nodeInfo
}
var metaOut algorithm.PredicateMetadata = nil
var metaOut algorithm.PredicateMetadata
if meta != nil {
metaOut = meta.ShallowCopy()
}
@ -460,7 +465,7 @@ func podFitsOnNode(
// TODO(bsalamat): consider using eCache and adding proper eCache invalidations
// when pods are nominated or their nominations change.
eCacheAvailable = equivCacheInfo != nil && !podsAdded
for _, predicateKey := range predicates.PredicatesOrdering() {
for _, predicateKey := range predicates.Ordering() {
//TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric
if predicate, exist := predicateFuncs[predicateKey]; exist {
if eCacheAvailable {
@ -513,7 +518,7 @@ func podFitsOnNode(
return len(failedPredicates) == 0, failedPredicates, nil
}
// Prioritizes the nodes by running the individual priority functions in parallel.
// PrioritizeNodes prioritizes the nodes by running the individual priority functions in parallel.
// Each priority function is expected to set a score of 0-10
// 0 is the lowest priority score (least preferred node) and 10 is the highest
// Each priority function can also have its own weight
@ -652,7 +657,7 @@ func PrioritizeNodes(
return result, nil
}
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
// EqualPriorityMap is a prioritizer function that gives an equal weight of one to all nodes
func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node()
if node == nil {
@ -1050,6 +1055,7 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
return nil
}
// NewGenericScheduler creates a genericScheduler object.
func NewGenericScheduler(
cache schedulercache.Cache,
eCache *EquivalenceCache,

View File

@ -546,7 +546,7 @@ func TestZeroRequest(t *testing.T) {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
strconv.FormatInt(priorityutil.DefaultMilliCPURequest, 10) + "m"),
v1.ResourceMemory: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
},
@ -563,7 +563,7 @@ func TestZeroRequest(t *testing.T) {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*3, 10) + "m"),
v1.ResourceMemory: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
},
@ -716,7 +716,7 @@ var smallContainers = []v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
strconv.FormatInt(priorityutil.DefaultMilliCPURequest, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
},
@ -728,7 +728,7 @@ var mediumContainers = []v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*2, 10) + "m"),
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*2, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*2, 10)),
},
@ -740,7 +740,7 @@ var largeContainers = []v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*3, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
},
@ -752,7 +752,7 @@ var veryLargeContainers = []v1.Container{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*5, 10) + "m"),
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*5, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*5, 10)),
},
@ -883,7 +883,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
for _, test := range tests {
nodes := []*v1.Node{}
for _, n := range test.nodes {
node := makeNode(n, priorityutil.DefaultMilliCpuRequest*5, priorityutil.DefaultMemoryRequest*5)
node := makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5)
node.ObjectMeta.Labels = map[string]string{"hostname": node.Name}
nodes = append(nodes, node)
}
@ -1046,7 +1046,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
for _, test := range tests {
nodes := []*v1.Node{}
for _, n := range test.nodes {
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCpuRequest*5, priorityutil.DefaultMemoryRequest*5))
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
}
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
@ -1285,7 +1285,7 @@ func TestPreempt(t *testing.T) {
cache.AddPod(pod)
}
for _, name := range nodeNames {
cache.AddNode(makeNode(name, priorityutil.DefaultMilliCpuRequest*5, priorityutil.DefaultMemoryRequest*5))
cache.AddNode(makeNode(name, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
}
extenders := []algorithm.SchedulerExtender{}
for _, extender := range test.extenders {

View File

@ -76,10 +76,12 @@ type FIFO struct {
var _ = SchedulingQueue(&FIFO{}) // Making sure that FIFO implements SchedulingQueue.
// Add adds a pod to the FIFO.
func (f *FIFO) Add(pod *v1.Pod) error {
return f.FIFO.Add(pod)
}
// AddIfNotPresent adds a pod to the FIFO if it is absent in the FIFO.
func (f *FIFO) AddIfNotPresent(pod *v1.Pod) error {
return f.FIFO.AddIfNotPresent(pod)
}
@ -90,10 +92,12 @@ func (f *FIFO) AddUnschedulableIfNotPresent(pod *v1.Pod) error {
return f.FIFO.AddIfNotPresent(pod)
}
// Update updates a pod in the FIFO.
func (f *FIFO) Update(pod *v1.Pod) error {
return f.FIFO.Update(pod)
}
// Delete deletes a pod in the FIFO.
func (f *FIFO) Delete(pod *v1.Pod) error {
return f.FIFO.Delete(pod)
}
@ -114,7 +118,11 @@ func (f *FIFO) Pop() (*v1.Pod, error) {
// FIFO does not need to react to events, as all pods are always in the active
// scheduling queue anyway.
func (f *FIFO) AssignedPodAdded(pod *v1.Pod) {}
// AssignedPodAdded does nothing here.
func (f *FIFO) AssignedPodAdded(pod *v1.Pod) {}
// AssignedPodUpdated does nothing here.
func (f *FIFO) AssignedPodUpdated(pod *v1.Pod) {}
// MoveAllToActiveQueue does nothing in FIFO as all pods are always in the active queue.
@ -126,6 +134,7 @@ func (f *FIFO) WaitingPodsForNode(nodeName string) []*v1.Pod {
return nil
}
// NewFIFO creates a FIFO object.
func NewFIFO() *FIFO {
return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
}
@ -169,6 +178,7 @@ type PriorityQueue struct {
// Making sure that PriorityQueue implements SchedulingQueue.
var _ = SchedulingQueue(&PriorityQueue{})
// NewPriorityQueue creates a PriorityQueue object.
func NewPriorityQueue() *PriorityQueue {
pq := &PriorityQueue{
activeQ: newHeap(cache.MetaNamespaceKeyFunc, util.HigherPriorityPod),
@ -294,10 +304,9 @@ func (p *PriorityQueue) Update(pod *v1.Pod) error {
p.cond.Broadcast()
}
return err
} else {
p.unschedulableQ.Update(pod)
return nil
}
p.unschedulableQ.Update(pod)
return nil
}
// If pod is not in any of the two queue, we put it in the active queue.
err := p.activeQ.Add(pod)
@ -417,6 +426,7 @@ type UnschedulablePodsMap struct {
var _ = UnschedulablePods(&UnschedulablePodsMap{})
// NominatedNodeName returns the nominated node name of a pod.
func NominatedNodeName(pod *v1.Pod) string {
return pod.Status.NominatedNodeName
}
@ -517,7 +527,10 @@ func newUnschedulablePodsMap() *UnschedulablePodsMap {
// as cache.heap, however, this heap does not perform synchronization. It leaves
// synchronization to the SchedulingQueue.
// LessFunc is a function type to compare two objects.
type LessFunc func(interface{}, interface{}) bool
// KeyFunc is a function type to get the key from an object.
type KeyFunc func(obj interface{}) (string, error)
type heapItem struct {
@ -681,9 +694,8 @@ func (h *Heap) Pop() (interface{}, error) {
obj := heap.Pop(h.data)
if obj != nil {
return obj, nil
} else {
return nil, fmt.Errorf("object was removed from heap data")
}
return nil, fmt.Errorf("object was removed from heap data")
}
// Get returns the requested item, or sets exists=false.

View File

@ -190,10 +190,9 @@ func NewConfigFactory(
case cache.DeletedFinalStateUnknown:
if pod, ok := t.Obj.(*v1.Pod); ok {
return assignedNonTerminatedPod(pod)
} else {
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
return false
}
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
return false
default:
runtime.HandleError(fmt.Errorf("unable to handle object in %T: %T", c, obj))
return false
@ -216,10 +215,9 @@ func NewConfigFactory(
case cache.DeletedFinalStateUnknown:
if pod, ok := t.Obj.(*v1.Pod); ok {
return unassignedNonTerminatedPod(pod)
} else {
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
return false
}
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
return false
default:
runtime.HandleError(fmt.Errorf("unable to handle object in %T: %T", c, obj))
return false
@ -531,13 +529,13 @@ func (c *configFactory) GetHardPodAffinitySymmetricWeight() int32 {
return c.hardPodAffinitySymmetricWeight
}
func (f *configFactory) GetSchedulerName() string {
return f.schedulerName
func (c *configFactory) GetSchedulerName() string {
return c.schedulerName
}
// GetClient provides a kubernetes client, mostly internal use, but may also be called by mock-tests.
func (f *configFactory) GetClient() clientset.Interface {
return f.client
func (c *configFactory) GetClient() clientset.Interface {
return c.client
}
// GetScheduledPodListerIndexer provides a pod lister, mostly internal use, but may also be called by mock-tests.
@ -865,23 +863,23 @@ func (c *configFactory) deletePDBFromCache(obj interface{}) {
}
// Create creates a scheduler with the default algorithm provider.
func (f *configFactory) Create() (*scheduler.Config, error) {
return f.CreateFromProvider(DefaultProvider)
func (c *configFactory) Create() (*scheduler.Config, error) {
return c.CreateFromProvider(DefaultProvider)
}
// Creates a scheduler from the name of a registered algorithm provider.
func (f *configFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) {
func (c *configFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) {
glog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName)
provider, err := GetAlgorithmProvider(providerName)
if err != nil {
return nil, err
}
return f.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{})
return c.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{})
}
// Creates a scheduler from the configuration file
func (f *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) {
func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) {
glog.V(2).Infof("Creating scheduler from configuration: %v", policy)
// validate the policy configuration
@ -923,98 +921,98 @@ func (f *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler
if len(policy.ExtenderConfigs) != 0 {
for ii := range policy.ExtenderConfigs {
glog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii])
if extender, err := core.NewHTTPExtender(&policy.ExtenderConfigs[ii]); err != nil {
extender, err := core.NewHTTPExtender(&policy.ExtenderConfigs[ii])
if err != nil {
return nil, err
} else {
extenders = append(extenders, extender)
}
extenders = append(extenders, extender)
}
}
// Providing HardPodAffinitySymmetricWeight in the policy config is the new and preferred way of providing the value.
// Give it higher precedence than scheduler CLI configuration when it is provided.
if policy.HardPodAffinitySymmetricWeight != 0 {
f.hardPodAffinitySymmetricWeight = policy.HardPodAffinitySymmetricWeight
c.hardPodAffinitySymmetricWeight = policy.HardPodAffinitySymmetricWeight
}
// When AlwaysCheckAllPredicates is set to true, scheduler checks all the configured
// predicates even after one or more of them fails.
if policy.AlwaysCheckAllPredicates {
f.alwaysCheckAllPredicates = policy.AlwaysCheckAllPredicates
c.alwaysCheckAllPredicates = policy.AlwaysCheckAllPredicates
}
return f.CreateFromKeys(predicateKeys, priorityKeys, extenders)
return c.CreateFromKeys(predicateKeys, priorityKeys, extenders)
}
// getBinder returns an extender that supports bind or a default binder.
func (f *configFactory) getBinder(extenders []algorithm.SchedulerExtender) scheduler.Binder {
func (c *configFactory) getBinder(extenders []algorithm.SchedulerExtender) scheduler.Binder {
for i := range extenders {
if extenders[i].IsBinder() {
return extenders[i]
}
}
return &binder{f.client}
return &binder{c.client}
}
// Creates a scheduler from a set of registered fit predicate keys and priority keys.
func (f *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*scheduler.Config, error) {
func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*scheduler.Config, error) {
glog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys)
if f.GetHardPodAffinitySymmetricWeight() < 1 || f.GetHardPodAffinitySymmetricWeight() > 100 {
return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", f.GetHardPodAffinitySymmetricWeight())
if c.GetHardPodAffinitySymmetricWeight() < 1 || c.GetHardPodAffinitySymmetricWeight() > 100 {
return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", c.GetHardPodAffinitySymmetricWeight())
}
predicateFuncs, err := f.GetPredicates(predicateKeys)
predicateFuncs, err := c.GetPredicates(predicateKeys)
if err != nil {
return nil, err
}
priorityConfigs, err := f.GetPriorityFunctionConfigs(priorityKeys)
priorityConfigs, err := c.GetPriorityFunctionConfigs(priorityKeys)
if err != nil {
return nil, err
}
priorityMetaProducer, err := f.GetPriorityMetadataProducer()
priorityMetaProducer, err := c.GetPriorityMetadataProducer()
if err != nil {
return nil, err
}
predicateMetaProducer, err := f.GetPredicateMetadataProducer()
predicateMetaProducer, err := c.GetPredicateMetadataProducer()
if err != nil {
return nil, err
}
// Init equivalence class cache
if f.enableEquivalenceClassCache && getEquivalencePodFuncFactory != nil {
pluginArgs, err := f.getPluginArgs()
if c.enableEquivalenceClassCache && getEquivalencePodFuncFactory != nil {
pluginArgs, err := c.getPluginArgs()
if err != nil {
return nil, err
}
f.equivalencePodCache = core.NewEquivalenceCache(
c.equivalencePodCache = core.NewEquivalenceCache(
getEquivalencePodFuncFactory(*pluginArgs),
)
glog.Info("Created equivalence class cache")
}
algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder, f.pVCLister, f.alwaysCheckAllPredicates)
algo := core.NewGenericScheduler(c.schedulerCache, c.equivalencePodCache, c.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, c.volumeBinder, c.pVCLister, c.alwaysCheckAllPredicates)
podBackoff := util.CreateDefaultPodBackoff()
return &scheduler.Config{
SchedulerCache: f.schedulerCache,
Ecache: f.equivalencePodCache,
SchedulerCache: c.schedulerCache,
Ecache: c.equivalencePodCache,
// The scheduler only needs to consider schedulable nodes.
NodeLister: &nodeLister{f.nodeLister},
NodeLister: &nodeLister{c.nodeLister},
Algorithm: algo,
Binder: f.getBinder(extenders),
PodConditionUpdater: &podConditionUpdater{f.client},
PodPreemptor: &podPreemptor{f.client},
Binder: c.getBinder(extenders),
PodConditionUpdater: &podConditionUpdater{c.client},
PodPreemptor: &podPreemptor{c.client},
WaitForCacheSync: func() bool {
return cache.WaitForCacheSync(f.StopEverything, f.scheduledPodsHasSynced)
return cache.WaitForCacheSync(c.StopEverything, c.scheduledPodsHasSynced)
},
NextPod: func() *v1.Pod {
return f.getNextPod()
return c.getNextPod()
},
Error: f.MakeDefaultErrorFunc(podBackoff, f.podQueue),
StopEverything: f.StopEverything,
VolumeBinder: f.volumeBinder,
Error: c.MakeDefaultErrorFunc(podBackoff, c.podQueue),
StopEverything: c.StopEverything,
VolumeBinder: c.volumeBinder,
}, nil
}
@ -1026,8 +1024,8 @@ func (n *nodeLister) List() ([]*v1.Node, error) {
return n.NodeLister.List(labels.Everything())
}
func (f *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) {
pluginArgs, err := f.getPluginArgs()
func (c *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) {
pluginArgs, err := c.getPluginArgs()
if err != nil {
return nil, err
}
@ -1035,8 +1033,8 @@ func (f *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]
return getPriorityFunctionConfigs(priorityKeys, *pluginArgs)
}
func (f *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) {
pluginArgs, err := f.getPluginArgs()
func (c *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) {
pluginArgs, err := c.getPluginArgs()
if err != nil {
return nil, err
}
@ -1044,16 +1042,16 @@ func (f *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadat
return getPriorityMetadataProducer(*pluginArgs)
}
func (f *configFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) {
pluginArgs, err := f.getPluginArgs()
func (c *configFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) {
pluginArgs, err := c.getPluginArgs()
if err != nil {
return nil, err
}
return getPredicateMetadataProducer(*pluginArgs)
}
func (f *configFactory) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) {
pluginArgs, err := f.getPluginArgs()
func (c *configFactory) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) {
pluginArgs, err := c.getPluginArgs()
if err != nil {
return nil, err
}
@ -1061,31 +1059,31 @@ func (f *configFactory) GetPredicates(predicateKeys sets.String) (map[string]alg
return getFitPredicateFunctions(predicateKeys, *pluginArgs)
}
func (f *configFactory) getPluginArgs() (*PluginFactoryArgs, error) {
func (c *configFactory) getPluginArgs() (*PluginFactoryArgs, error) {
return &PluginFactoryArgs{
PodLister: f.podLister,
ServiceLister: f.serviceLister,
ControllerLister: f.controllerLister,
ReplicaSetLister: f.replicaSetLister,
StatefulSetLister: f.statefulSetLister,
NodeLister: &nodeLister{f.nodeLister},
NodeInfo: &predicates.CachedNodeInfo{NodeLister: f.nodeLister},
PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: f.pVLister},
PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: f.pVCLister},
StorageClassInfo: &predicates.CachedStorageClassInfo{StorageClassLister: f.storageClassLister},
VolumeBinder: f.volumeBinder,
HardPodAffinitySymmetricWeight: f.hardPodAffinitySymmetricWeight,
PodLister: c.podLister,
ServiceLister: c.serviceLister,
ControllerLister: c.controllerLister,
ReplicaSetLister: c.replicaSetLister,
StatefulSetLister: c.statefulSetLister,
NodeLister: &nodeLister{c.nodeLister},
NodeInfo: &predicates.CachedNodeInfo{NodeLister: c.nodeLister},
PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: c.pVLister},
PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: c.pVCLister},
StorageClassInfo: &predicates.CachedStorageClassInfo{StorageClassLister: c.storageClassLister},
VolumeBinder: c.volumeBinder,
HardPodAffinitySymmetricWeight: c.hardPodAffinitySymmetricWeight,
}, nil
}
func (f *configFactory) getNextPod() *v1.Pod {
if pod, err := f.podQueue.Pop(); err == nil {
func (c *configFactory) getNextPod() *v1.Pod {
pod, err := c.podQueue.Pop()
if err == nil {
glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
return pod
} else {
glog.Errorf("Error while retrieving next pod from scheduling queue: %v", err)
return nil
}
glog.Errorf("Error while retrieving next pod from scheduling queue: %v", err)
return nil
}
// unassignedNonTerminatedPod selects pods that are unassigned and non-terminal.
@ -1193,7 +1191,7 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration, sche
}
}
func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) {
func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) {
return func(pod *v1.Pod, err error) {
if err == core.ErrNoNodesAvailable {
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
@ -1205,13 +1203,13 @@ func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod
nodeName := errStatus.Status().Details.Name
// when node is not found, We do not remove the node right away. Trying again to get
// the node and if the node is still not found, then remove it from the scheduler cache.
_, err := factory.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
_, err := c.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}}
factory.schedulerCache.RemoveNode(&node)
c.schedulerCache.RemoveNode(&node)
// invalidate cached predicate for the node
if factory.enableEquivalenceClassCache {
factory.equivalencePodCache.InvalidateAllCachedPredicateItemOfNode(nodeName)
if c.enableEquivalenceClassCache {
c.equivalencePodCache.InvalidateAllCachedPredicateItemOfNode(nodeName)
}
}
}
@ -1245,14 +1243,14 @@ func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod
// Get the pod again; it may have changed/been scheduled already.
getBackoff := initialGetBackoff
for {
pod, err := factory.client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{})
pod, err := c.client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{})
if err == nil {
if len(pod.Spec.NodeName) == 0 {
podQueue.AddUnschedulableIfNotPresent(pod)
} else {
if factory.volumeBinder != nil {
if c.volumeBinder != nil {
// Volume binder only wants to keep unassigned pods
factory.volumeBinder.DeletePodBindings(pod)
c.volumeBinder.DeletePodBindings(pod)
}
}
break
@ -1260,9 +1258,9 @@ func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod
if errors.IsNotFound(err) {
glog.Warningf("A pod %v no longer exists", podID)
if factory.volumeBinder != nil {
if c.volumeBinder != nil {
// Volume binder only wants to keep unassigned pods
factory.volumeBinder.DeletePodBindings(origPod)
c.volumeBinder.DeletePodBindings(origPod)
}
return
}

View File

@ -55,20 +55,20 @@ type PriorityMetadataProducerFactory func(PluginFactoryArgs) algorithm.PriorityM
// PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args.
type PredicateMetadataProducerFactory func(PluginFactoryArgs) algorithm.PredicateMetadataProducer
// A FitPredicateFactory produces a FitPredicate from the given args.
// FitPredicateFactory produces a FitPredicate from the given args.
type FitPredicateFactory func(PluginFactoryArgs) algorithm.FitPredicate
// PriorityFunctionFactory produces a PriorityConfig from the given args.
// DEPRECATED
// Use Map-Reduce pattern for priority functions.
// A PriorityFunctionFactory produces a PriorityConfig from the given args.
type PriorityFunctionFactory func(PluginFactoryArgs) algorithm.PriorityFunction
// A PriorityFunctionFactory produces map & reduce priority functions
// PriorityFunctionFactory2 produces map & reduce priority functions
// from a given args.
// FIXME: Rename to PriorityFunctionFactory.
type PriorityFunctionFactory2 func(PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction)
// A PriorityConfigFactory produces a PriorityConfig from the given function and weight
// PriorityConfigFactory produces a PriorityConfig from the given function and weight
type PriorityConfigFactory struct {
Function PriorityFunctionFactory
MapReduceFunction PriorityFunctionFactory2
@ -96,9 +96,11 @@ var (
)
const (
// DefaultProvider defines the default algorithm provider name.
DefaultProvider = "DefaultProvider"
)
// AlgorithmProviderConfig is used to store the configuration of algorithm providers.
type AlgorithmProviderConfig struct {
FitPredicateKeys sets.String
PriorityFunctionKeys sets.String
@ -244,22 +246,24 @@ func IsFitPredicateRegistered(name string) bool {
return ok
}
// RegisterPriorityMetadataProducerFactory registers a PriorityMetadataProducerFactory.
func RegisterPriorityMetadataProducerFactory(factory PriorityMetadataProducerFactory) {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
priorityMetadataProducer = factory
}
// RegisterPredicateMetadataProducerFactory registers a PredicateMetadataProducerFactory.
func RegisterPredicateMetadataProducerFactory(factory PredicateMetadataProducerFactory) {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
predicateMetadataProducer = factory
}
// RegisterPriorityFunction registers a priority function with the algorithm registry. Returns the name,
// with which the function was registered.
// DEPRECATED
// Use Map-Reduce pattern for priority functions.
// Registers a priority function with the algorithm registry. Returns the name,
// with which the function was registered.
func RegisterPriorityFunction(name string, function algorithm.PriorityFunction, weight int) string {
return RegisterPriorityConfigFactory(name, PriorityConfigFactory{
Function: func(PluginFactoryArgs) algorithm.PriorityFunction {
@ -285,6 +289,7 @@ func RegisterPriorityFunction2(
})
}
// RegisterPriorityConfigFactory registers a priority config factory with its name.
func RegisterPriorityConfigFactory(name string, pcf PriorityConfigFactory) string {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
@ -506,6 +511,7 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {
}
}
// ListRegisteredFitPredicates returns the registered fit predicates.
func ListRegisteredFitPredicates() []string {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
@ -517,6 +523,7 @@ func ListRegisteredFitPredicates() []string {
return names
}
// ListRegisteredPriorityFunctions returns the registered priority functions.
func ListRegisteredPriorityFunctions() []string {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()

View File

@ -107,7 +107,7 @@ func Register() {
})
}
// Gets the time since the specified start in microseconds.
// SinceInMicroseconds gets the time since the specified start in microseconds.
func SinceInMicroseconds(start time.Time) float64 {
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
}

View File

@ -257,7 +257,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
}
cache.podStates[key] = ps
default:
return fmt.Errorf("pod %v was already in added state.", key)
return fmt.Errorf("pod %v was already in added state", key)
}
return nil
}

View File

@ -125,7 +125,7 @@ func TestAssumePodScheduled(t *testing.T) {
Memory: 0,
},
nonzeroRequest: &Resource{
MilliCPU: priorityutil.DefaultMilliCpuRequest,
MilliCPU: priorityutil.DefaultMilliCPURequest,
Memory: priorityutil.DefaultMemoryRequest,
},
allocatableResource: &Resource{},
@ -710,10 +710,10 @@ func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *NodeInfo {
func TestNodeOperators(t *testing.T) {
// Test datas
nodeName := "test-node"
cpu_1 := resource.MustParse("1000m")
mem_100m := resource.MustParse("100m")
cpu_half := resource.MustParse("500m")
mem_50m := resource.MustParse("50m")
cpu1 := resource.MustParse("1000m")
mem100m := resource.MustParse("100m")
cpuHalf := resource.MustParse("500m")
mem50m := resource.MustParse("50m")
resourceFooName := "example.com/foo"
resourceFoo := resource.MustParse("1")
@ -728,8 +728,8 @@ func TestNodeOperators(t *testing.T) {
},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceCPU: cpu_1,
v1.ResourceMemory: mem_100m,
v1.ResourceCPU: cpu1,
v1.ResourceMemory: mem100m,
v1.ResourceName(resourceFooName): resourceFoo,
},
},
@ -754,8 +754,8 @@ func TestNodeOperators(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu_half,
v1.ResourceMemory: mem_50m,
v1.ResourceCPU: cpuHalf,
v1.ResourceMemory: mem50m,
},
},
Ports: []v1.ContainerPort{
@ -778,8 +778,8 @@ func TestNodeOperators(t *testing.T) {
},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceCPU: cpu_1,
v1.ResourceMemory: mem_100m,
v1.ResourceCPU: cpu1,
v1.ResourceMemory: mem100m,
v1.ResourceName(resourceFooName): resourceFoo,
},
},
@ -804,8 +804,8 @@ func TestNodeOperators(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu_half,
v1.ResourceMemory: mem_50m,
v1.ResourceCPU: cpuHalf,
v1.ResourceMemory: mem50m,
},
},
},
@ -822,8 +822,8 @@ func TestNodeOperators(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu_half,
v1.ResourceMemory: mem_50m,
v1.ResourceCPU: cpuHalf,
v1.ResourceMemory: mem50m,
},
},
},
@ -866,8 +866,8 @@ func TestNodeOperators(t *testing.T) {
}
// Case 3: update node attribute successfully.
node.Status.Allocatable[v1.ResourceMemory] = mem_50m
expected.allocatableResource.Memory = mem_50m.Value()
node.Status.Allocatable[v1.ResourceMemory] = mem50m
expected.allocatableResource.Memory = mem50m.Value()
expected.generation++
cache.UpdateNode(nil, node)
got, found = cache.nodes[node.Name]

View File

@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
)
// PodFilter is a function to filter a pod. If pod passed return true else return false.
type PodFilter func(*v1.Pod) bool
// Cache collects pods' information and provides node-level aggregated information.

View File

@ -75,7 +75,7 @@ type Resource struct {
ScalarResources map[v1.ResourceName]int64
}
// New creates a Resource from ResourceList
// NewResource creates a Resource from ResourceList
func NewResource(rl v1.ResourceList) *Resource {
r := &Resource{}
r.Add(rl)
@ -108,6 +108,7 @@ func (r *Resource) Add(rl v1.ResourceList) {
}
}
// ResourceList returns a resource list of this resource.
func (r *Resource) ResourceList() v1.ResourceList {
result := v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
@ -126,6 +127,7 @@ func (r *Resource) ResourceList() v1.ResourceList {
return result
}
// Clone returns a copy of this resource.
func (r *Resource) Clone() *Resource {
res := &Resource{
MilliCPU: r.MilliCPU,
@ -143,10 +145,12 @@ func (r *Resource) Clone() *Resource {
return res
}
// AddScalar adds a resource by a scalar value of this resource.
func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) {
r.SetScalar(name, r.ScalarResources[name]+quantity)
}
// SetScalar sets a resource by a scalar value of this resource.
func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) {
// Lazily allocate scalar resource map.
if r.ScalarResources == nil {
@ -172,7 +176,7 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
return ni
}
// Returns overall information about this node.
// Node returns overall information about this node.
func (n *NodeInfo) Node() *v1.Node {
if n == nil {
return nil
@ -188,6 +192,7 @@ func (n *NodeInfo) Pods() []*v1.Pod {
return n.pods
}
// UsedPorts returns used ports on this node.
func (n *NodeInfo) UsedPorts() util.HostPortInfo {
if n == nil {
return nil
@ -203,6 +208,7 @@ func (n *NodeInfo) PodsWithAffinity() []*v1.Pod {
return n.podsWithAffinity
}
// AllowedPodNumber returns the number of the allowed pods on this node.
func (n *NodeInfo) AllowedPodNumber() int {
if n == nil || n.allocatableResource == nil {
return 0
@ -210,6 +216,7 @@ func (n *NodeInfo) AllowedPodNumber() int {
return n.allocatableResource.AllowedPodNumber
}
// Taints returns the taints list on this node.
func (n *NodeInfo) Taints() ([]v1.Taint, error) {
if n == nil {
return nil, nil
@ -217,6 +224,7 @@ func (n *NodeInfo) Taints() ([]v1.Taint, error) {
return n.taints, n.taintsErr
}
// MemoryPressureCondition returns the memory pressure condition status on this node.
func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus {
if n == nil {
return v1.ConditionUnknown
@ -224,6 +232,7 @@ func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus {
return n.memoryPressureCondition
}
// DiskPressureCondition returns the disk pressure condition status on this node.
func (n *NodeInfo) DiskPressureCondition() v1.ConditionStatus {
if n == nil {
return v1.ConditionUnknown
@ -260,6 +269,7 @@ func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) {
n.allocatableResource = allocatableResource
}
// Clone returns a copy of this node.
func (n *NodeInfo) Clone() *NodeInfo {
clone := &NodeInfo{
node: n.node,
@ -306,7 +316,7 @@ func hasPodAffinityConstraints(pod *v1.Pod) bool {
// AddPod adds pod information to this NodeInfo.
func (n *NodeInfo) AddPod(pod *v1.Pod) {
res, non0_cpu, non0_mem := calculateResource(pod)
res, non0CPU, non0Mem := calculateResource(pod)
n.requestedResource.MilliCPU += res.MilliCPU
n.requestedResource.Memory += res.Memory
n.requestedResource.NvidiaGPU += res.NvidiaGPU
@ -317,8 +327,8 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
for rName, rQuant := range res.ScalarResources {
n.requestedResource.ScalarResources[rName] += rQuant
}
n.nonzeroRequest.MilliCPU += non0_cpu
n.nonzeroRequest.Memory += non0_mem
n.nonzeroRequest.MilliCPU += non0CPU
n.nonzeroRequest.Memory += non0Mem
n.pods = append(n.pods, pod)
if hasPodAffinityConstraints(pod) {
n.podsWithAffinity = append(n.podsWithAffinity, pod)
@ -361,7 +371,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
n.pods[i] = n.pods[len(n.pods)-1]
n.pods = n.pods[:len(n.pods)-1]
// reduce the resource data
res, non0_cpu, non0_mem := calculateResource(pod)
res, non0CPU, non0Mem := calculateResource(pod)
n.requestedResource.MilliCPU -= res.MilliCPU
n.requestedResource.Memory -= res.Memory
@ -372,8 +382,8 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
for rName, rQuant := range res.ScalarResources {
n.requestedResource.ScalarResources[rName] -= rQuant
}
n.nonzeroRequest.MilliCPU -= non0_cpu
n.nonzeroRequest.Memory -= non0_mem
n.nonzeroRequest.MilliCPU -= non0CPU
n.nonzeroRequest.Memory -= non0Mem
// Release ports when remove Pods.
n.updateUsedPorts(pod, false)
@ -386,14 +396,14 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
}
func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int64) {
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
resPtr := &res
for _, c := range pod.Spec.Containers {
resPtr.Add(c.Resources.Requests)
non0_cpu_req, non0_mem_req := priorityutil.GetNonzeroRequests(&c.Resources.Requests)
non0_cpu += non0_cpu_req
non0_mem += non0_mem_req
non0CPUReq, non0MemReq := priorityutil.GetNonzeroRequests(&c.Resources.Requests)
non0CPU += non0CPUReq
non0Mem += non0MemReq
// No non-zero resources for GPUs or opaque resources.
}
@ -414,7 +424,7 @@ func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) {
}
}
// Sets the overall node information.
// SetNode sets the overall node information.
func (n *NodeInfo) SetNode(node *v1.Node) error {
n.node = node
@ -436,7 +446,7 @@ func (n *NodeInfo) SetNode(node *v1.Node) error {
return nil
}
// Removes the overall information about the node.
// RemoveNode removes the overall information about the node.
func (n *NodeInfo) RemoveNode(node *v1.Node) error {
// We don't remove NodeInfo for because there can still be some pods on this node -
// this is because notifications about pods are delivered in a different watch,

View File

@ -31,54 +31,72 @@ type FakeCache struct {
GetPodFunc func(*v1.Pod) *v1.Pod
}
// AssumePod is a fake method for testing.
func (f *FakeCache) AssumePod(pod *v1.Pod) error {
f.AssumeFunc(pod)
return nil
}
// FinishBinding is a fake method for testing.
func (f *FakeCache) FinishBinding(pod *v1.Pod) error { return nil }
// ForgetPod is a fake method for testing.
func (f *FakeCache) ForgetPod(pod *v1.Pod) error {
f.ForgetFunc(pod)
return nil
}
// AddPod is a fake method for testing.
func (f *FakeCache) AddPod(pod *v1.Pod) error { return nil }
// UpdatePod is a fake method for testing.
func (f *FakeCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil }
// RemovePod is a fake method for testing.
func (f *FakeCache) RemovePod(pod *v1.Pod) error { return nil }
// IsAssumedPod is a fake method for testing.
func (f *FakeCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
return f.IsAssumedPodFunc(pod), nil
}
// GetPod is a fake method for testing.
func (f *FakeCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
return f.GetPodFunc(pod), nil
}
// AddNode is a fake method for testing.
func (f *FakeCache) AddNode(node *v1.Node) error { return nil }
// UpdateNode is a fake method for testing.
func (f *FakeCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
// RemoveNode is a fake method for testing.
func (f *FakeCache) RemoveNode(node *v1.Node) error { return nil }
// UpdateNodeNameToInfoMap is a fake method for testing.
func (f *FakeCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
return nil
}
// AddPDB is a fake method for testing.
func (f *FakeCache) AddPDB(pdb *policy.PodDisruptionBudget) error { return nil }
// UpdatePDB is a fake method for testing.
func (f *FakeCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error { return nil }
// RemovePDB is a fake method for testing.
func (f *FakeCache) RemovePDB(pdb *policy.PodDisruptionBudget) error { return nil }
// ListPDBs is a fake method for testing.
func (f *FakeCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) {
return nil, nil
}
// List is a fake method for testing.
func (f *FakeCache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
// FilteredList is a fake method for testing.
func (f *FakeCache) FilteredList(filter schedulercache.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
return nil, nil
}

View File

@ -25,11 +25,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
corelisters "k8s.io/client-go/listers/core/v1"
. "k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
var _ NodeLister = &FakeNodeLister{}
var _ algorithm.NodeLister = &FakeNodeLister{}
// FakeNodeLister implements NodeLister on a []string for test purposes.
type FakeNodeLister []*v1.Node
@ -39,7 +39,7 @@ func (f FakeNodeLister) List() ([]*v1.Node, error) {
return f, nil
}
var _ PodLister = &FakePodLister{}
var _ algorithm.PodLister = &FakePodLister{}
// FakePodLister implements PodLister on an []v1.Pods for test purposes.
type FakePodLister []*v1.Pod
@ -54,6 +54,7 @@ func (f FakePodLister) List(s labels.Selector) (selected []*v1.Pod, err error) {
return selected, nil
}
// FilteredList returns pods matching a pod filter and a label selector.
func (f FakePodLister) FilteredList(podFilter schedulercache.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
for _, pod := range f {
if podFilter(pod) && s.Matches(labels.Set(pod.Labels)) {
@ -63,7 +64,7 @@ func (f FakePodLister) FilteredList(podFilter schedulercache.PodFilter, s labels
return selected, nil
}
var _ ServiceLister = &FakeServiceLister{}
var _ algorithm.ServiceLister = &FakeServiceLister{}
// FakeServiceLister implements ServiceLister on []v1.Service for test purposes.
type FakeServiceLister []*v1.Service
@ -91,7 +92,7 @@ func (f FakeServiceLister) GetPodServices(pod *v1.Pod) (services []*v1.Service,
return
}
var _ ControllerLister = &FakeControllerLister{}
var _ algorithm.ControllerLister = &FakeControllerLister{}
// FakeControllerLister implements ControllerLister on []v1.ReplicationController for test purposes.
type FakeControllerLister []*v1.ReplicationController
@ -122,7 +123,7 @@ func (f FakeControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.
return
}
var _ ReplicaSetLister = &FakeReplicaSetLister{}
var _ algorithm.ReplicaSetLister = &FakeReplicaSetLister{}
// FakeReplicaSetLister implements ControllerLister on []extensions.ReplicaSet for test purposes.
type FakeReplicaSetLister []*extensions.ReplicaSet
@ -151,7 +152,7 @@ func (f FakeReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.
return
}
var _ StatefulSetLister = &FakeStatefulSetLister{}
var _ algorithm.StatefulSetLister = &FakeStatefulSetLister{}
// FakeStatefulSetLister implements ControllerLister on []apps.StatefulSet for testing purposes.
type FakeStatefulSetLister []*apps.StatefulSet
@ -183,10 +184,12 @@ type FakePersistentVolumeClaimLister []*v1.PersistentVolumeClaim
var _ corelisters.PersistentVolumeClaimLister = FakePersistentVolumeClaimLister{}
// List returns not implemented error.
func (f FakePersistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) {
return nil, fmt.Errorf("not implemented")
}
// PersistentVolumeClaims returns a FakePersistentVolumeClaimLister object.
func (f FakePersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) corelisters.PersistentVolumeClaimNamespaceLister {
return &fakePersistentVolumeClaimNamespaceLister{
pvcs: f,

View File

@ -25,26 +25,36 @@ import (
// PodsToCache is used for testing
type PodsToCache []*v1.Pod
// AssumePod returns nil.
func (p PodsToCache) AssumePod(pod *v1.Pod) error { return nil }
// ForgetPod returns nil.
func (p PodsToCache) ForgetPod(pod *v1.Pod) error { return nil }
// AddPod returns nil.
func (p PodsToCache) AddPod(pod *v1.Pod) error { return nil }
// UpdatePod returns nil.
func (p PodsToCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil }
// RemovePod returns nil.
func (p PodsToCache) RemovePod(pod *v1.Pod) error { return nil }
// AddNode returns nil.
func (p PodsToCache) AddNode(node *v1.Node) error { return nil }
// UpdateNode returns nil.
func (p PodsToCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
// RemoveNode returns nil.
func (p PodsToCache) RemoveNode(node *v1.Node) error { return nil }
// UpdateNodeNameToInfoMap returns nil.
func (p PodsToCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
return nil
}
// List returns pods matching the label selector.
func (p PodsToCache) List(s labels.Selector) (selected []*v1.Pod, err error) {
for _, pod := range p {
if s.Matches(labels.Set(pod.Labels)) {

View File

@ -37,10 +37,10 @@ func (realClock) Now() time.Time {
return time.Now()
}
// backoffEntry is single threaded. in particular, it only allows a single action to be waiting on backoff at a time.
// BackoffEntry is single threaded. in particular, it only allows a single action to be waiting on backoff at a time.
// It is expected that all users will only use the public TryWait(...) method
// It is also not safe to copy this object.
type backoffEntry struct {
type BackoffEntry struct {
backoff time.Duration
lastUpdate time.Time
reqInFlight int32
@ -48,19 +48,19 @@ type backoffEntry struct {
// tryLock attempts to acquire a lock via atomic compare and swap.
// returns true if the lock was acquired, false otherwise
func (b *backoffEntry) tryLock() bool {
func (b *BackoffEntry) tryLock() bool {
return atomic.CompareAndSwapInt32(&b.reqInFlight, 0, 1)
}
// unlock returns the lock. panics if the lock isn't held
func (b *backoffEntry) unlock() {
func (b *BackoffEntry) unlock() {
if !atomic.CompareAndSwapInt32(&b.reqInFlight, 1, 0) {
panic(fmt.Sprintf("unexpected state on unlocking: %+v", b))
}
}
// TryWait tries to acquire the backoff lock, maxDuration is the maximum allowed period to wait for.
func (b *backoffEntry) TryWait(maxDuration time.Duration) bool {
func (b *BackoffEntry) TryWait(maxDuration time.Duration) bool {
if !b.tryLock() {
return false
}
@ -69,62 +69,69 @@ func (b *backoffEntry) TryWait(maxDuration time.Duration) bool {
return true
}
func (entry *backoffEntry) getBackoff(maxDuration time.Duration) time.Duration {
duration := entry.backoff
func (b *BackoffEntry) getBackoff(maxDuration time.Duration) time.Duration {
duration := b.backoff
newDuration := time.Duration(duration) * 2
if newDuration > maxDuration {
newDuration = maxDuration
}
entry.backoff = newDuration
b.backoff = newDuration
glog.V(4).Infof("Backing off %s", duration.String())
return duration
}
func (entry *backoffEntry) wait(maxDuration time.Duration) {
time.Sleep(entry.getBackoff(maxDuration))
func (b *BackoffEntry) wait(maxDuration time.Duration) {
time.Sleep(b.getBackoff(maxDuration))
}
// PodBackoff is used to restart a pod with back-off delay.
type PodBackoff struct {
perPodBackoff map[ktypes.NamespacedName]*backoffEntry
perPodBackoff map[ktypes.NamespacedName]*BackoffEntry
lock sync.Mutex
clock clock
defaultDuration time.Duration
maxDuration time.Duration
}
// MaxDuration returns the max time duration of the back-off.
func (p *PodBackoff) MaxDuration() time.Duration {
return p.maxDuration
}
// CreateDefaultPodBackoff creates a default pod back-off object.
func CreateDefaultPodBackoff() *PodBackoff {
return CreatePodBackoff(1*time.Second, 60*time.Second)
}
// CreatePodBackoff creates a pod back-off object by default duration and max duration.
func CreatePodBackoff(defaultDuration, maxDuration time.Duration) *PodBackoff {
return CreatePodBackoffWithClock(defaultDuration, maxDuration, realClock{})
}
// CreatePodBackoffWithClock creates a pod back-off object by default duration, max duration and clock.
func CreatePodBackoffWithClock(defaultDuration, maxDuration time.Duration, clock clock) *PodBackoff {
return &PodBackoff{
perPodBackoff: map[ktypes.NamespacedName]*backoffEntry{},
perPodBackoff: map[ktypes.NamespacedName]*BackoffEntry{},
clock: clock,
defaultDuration: defaultDuration,
maxDuration: maxDuration,
}
}
func (p *PodBackoff) GetEntry(podID ktypes.NamespacedName) *backoffEntry {
// GetEntry returns a back-off entry by Pod ID.
func (p *PodBackoff) GetEntry(podID ktypes.NamespacedName) *BackoffEntry {
p.lock.Lock()
defer p.lock.Unlock()
entry, ok := p.perPodBackoff[podID]
if !ok {
entry = &backoffEntry{backoff: p.defaultDuration}
entry = &BackoffEntry{backoff: p.defaultDuration}
p.perPodBackoff[podID] = entry
}
entry.lastUpdate = p.clock.Now()
return entry
}
// Gc execute garbage collection on the pod back-off.
func (p *PodBackoff) Gc() {
p.lock.Lock()
defer p.lock.Unlock()

View File

@ -28,9 +28,11 @@ import (
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
// Init the core api installation
_ "k8s.io/kubernetes/pkg/apis/core/install"
)
// TestGroup defines a api group for testing.
type TestGroup struct {
externalGroupVersion schema.GroupVersion
internalGroupVersion schema.GroupVersion
@ -39,8 +41,10 @@ type TestGroup struct {
}
var (
// Groups defines a TestGroup map.
Groups = make(map[string]TestGroup)
Test TestGroup
// Test defines a TestGroup object.
Test TestGroup
serializer runtime.SerializerInfo
)

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/features"
)
// DefaultBindAllHostIP defines the default ip address used to bind to all host.
const DefaultBindAllHostIP = "0.0.0.0"
// ProtocolPort represents a protocol port pair, e.g. tcp:80.

View File

@ -68,7 +68,7 @@ func TestSortableList(t *testing.T) {
podList := SortableList{CompFunc: higherPriority}
// Add a few Pods with different priorities from lowest to highest priority.
for i := 0; i < 10; i++ {
var p int32 = int32(i)
var p = int32(i)
pod := &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{

View File

@ -334,7 +334,7 @@ func TestSchedulerExtender(t *testing.T) {
FilterVerb: filter,
PrioritizeVerb: prioritize,
Weight: 3,
EnableHttps: false,
EnableHTTPS: false,
},
{
URLPrefix: es2.URL,
@ -342,14 +342,14 @@ func TestSchedulerExtender(t *testing.T) {
PrioritizeVerb: prioritize,
BindVerb: bind,
Weight: 4,
EnableHttps: false,
EnableHTTPS: false,
},
{
URLPrefix: es3.URL,
FilterVerb: filter,
PrioritizeVerb: prioritize,
Weight: 10,
EnableHttps: false,
EnableHTTPS: false,
NodeCacheCapable: true,
},
},