mirror of https://github.com/k3s-io/k3s
Remove BalanceAttachedNodeVolumes
parent
4d1ac6b2e5
commit
d1e0b5efa6
|
@ -249,14 +249,6 @@ const (
|
|||
// volume limits
|
||||
AttachVolumeLimit utilfeature.Feature = "AttachVolumeLimit"
|
||||
|
||||
// owner: @ravig
|
||||
// alpha: v1.11
|
||||
//
|
||||
// Include volume count on node to be considered for balanced resource allocation while scheduling.
|
||||
// A node which has closer cpu,memory utilization and volume count is favoured by scheduler
|
||||
// while making decisions.
|
||||
BalanceAttachedNodeVolumes utilfeature.Feature = "BalanceAttachedNodeVolumes"
|
||||
|
||||
// owner @freehan
|
||||
// GA: v1.14
|
||||
//
|
||||
|
@ -426,7 +418,6 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
|
|||
RunAsGroup: {Default: true, PreRelease: utilfeature.Beta},
|
||||
CSIMigrationOpenStack: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
VolumeSubpath: {Default: true, PreRelease: utilfeature.GA},
|
||||
BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
PodReadinessGates: {Default: true, PreRelease: utilfeature.GA, LockToDefault: true}, // remove in 1.16
|
||||
VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
KubeletPluginsWatcher: {Default: true, PreRelease: utilfeature.GA, LockToDefault: true}, // remove in 1.16
|
||||
|
|
|
@ -519,12 +519,6 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata,
|
|||
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
||||
return false, []PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
}
|
||||
if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) {
|
||||
nodeInfo.TransientInfo.TransientLock.Lock()
|
||||
defer nodeInfo.TransientInfo.TransientLock.Unlock()
|
||||
nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount = maxAttachLimit - numExistingVolumes
|
||||
nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes = numNewVolumes
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -19,8 +19,6 @@ package priorities
|
|||
import (
|
||||
"math"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
@ -42,20 +40,6 @@ func balancedResourceScorer(requested, allocable *schedulernodeinfo.Resource, in
|
|||
cpuFraction := fractionOfCapacity(requested.MilliCPU, allocable.MilliCPU)
|
||||
memoryFraction := fractionOfCapacity(requested.Memory, allocable.Memory)
|
||||
// This to find a node which has most balanced CPU, memory and volume usage.
|
||||
if includeVolumes && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && allocatableVolumes > 0 {
|
||||
volumeFraction := float64(requestedVolumes) / float64(allocatableVolumes)
|
||||
if cpuFraction >= 1 || memoryFraction >= 1 || volumeFraction >= 1 {
|
||||
// if requested >= capacity, the corresponding host should never be preferred.
|
||||
return 0
|
||||
}
|
||||
// Compute variance for all the three fractions.
|
||||
mean := (cpuFraction + memoryFraction + volumeFraction) / float64(3)
|
||||
variance := float64((((cpuFraction - mean) * (cpuFraction - mean)) + ((memoryFraction - mean) * (memoryFraction - mean)) + ((volumeFraction - mean) * (volumeFraction - mean))) / float64(3))
|
||||
// Since the variance is between positive fractions, it will be positive fraction. 1-variance lets the
|
||||
// score to be higher for node which has least variance and multiplying it with 10 provides the scaling
|
||||
// factor needed.
|
||||
return int64((1 - variance) * float64(schedulerapi.MaxPriority))
|
||||
}
|
||||
|
||||
if cpuFraction >= 1 || memoryFraction >= 1 {
|
||||
// if requested >= capacity, the corresponding host should never be preferred.
|
||||
|
|
|
@ -20,9 +20,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
@ -58,31 +56,16 @@ func (r *ResourceAllocationPriority) PriorityMap(
|
|||
requested.Memory += nodeInfo.NonZeroRequest().Memory
|
||||
var score int64
|
||||
// Check if the pod has volumes and this could be added to scorer function for balanced resource allocation.
|
||||
if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil {
|
||||
score = r.scorer(&requested, &allocatable, true, nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount)
|
||||
} else {
|
||||
score = r.scorer(&requested, &allocatable, false, 0, 0)
|
||||
}
|
||||
score = r.scorer(&requested, &allocatable, false, 0, 0)
|
||||
|
||||
if klog.V(10) {
|
||||
if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil {
|
||||
klog.Infof(
|
||||
"%v -> %v: %v, capacity %d millicores %d memory bytes, %d volumes, total request %d millicores %d memory bytes %d volumes, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
allocatable.MilliCPU, allocatable.Memory, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount,
|
||||
requested.MilliCPU, requested.Memory,
|
||||
nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes,
|
||||
score,
|
||||
)
|
||||
} else {
|
||||
klog.Infof(
|
||||
"%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
allocatable.MilliCPU, allocatable.Memory,
|
||||
requested.MilliCPU, requested.Memory,
|
||||
score,
|
||||
)
|
||||
}
|
||||
klog.Infof(
|
||||
"%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
allocatable.MilliCPU, allocatable.Memory,
|
||||
requested.MilliCPU, requested.Memory,
|
||||
score,
|
||||
)
|
||||
}
|
||||
|
||||
return schedulerapi.HostPriority{
|
||||
|
|
|
@ -21,12 +21,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
||||
|
@ -210,7 +208,6 @@ func (cache *schedulerCache) Snapshot() *Snapshot {
|
|||
func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *NodeInfoSnapshot) error {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
balancedVolumesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes)
|
||||
|
||||
// Get the last generation of the the snapshot.
|
||||
snapshotGeneration := nodeSnapshot.Generation
|
||||
|
@ -222,10 +219,6 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *NodeInfoSnapsh
|
|||
// all the nodes are updated before the existing snapshot. We are done.
|
||||
break
|
||||
}
|
||||
if balancedVolumesEnabled && node.info.TransientInfo != nil {
|
||||
// Transient scheduler info is reset here.
|
||||
node.info.TransientInfo.ResetTransientSchedulerInfo()
|
||||
}
|
||||
if np := node.info.Node(); np != nil {
|
||||
nodeSnapshot.NodeInfoMap[np.Name] = node.info.Clone()
|
||||
}
|
||||
|
|
|
@ -126,15 +126,6 @@ func NewTransientSchedulerInfo() *TransientSchedulerInfo {
|
|||
return tsi
|
||||
}
|
||||
|
||||
// ResetTransientSchedulerInfo resets the TransientSchedulerInfo.
|
||||
func (transientSchedInfo *TransientSchedulerInfo) ResetTransientSchedulerInfo() {
|
||||
transientSchedInfo.TransientLock.Lock()
|
||||
defer transientSchedInfo.TransientLock.Unlock()
|
||||
// Reset TransientNodeInfo.
|
||||
transientSchedInfo.TransNodeInfo.AllocatableVolumesCount = 0
|
||||
transientSchedInfo.TransNodeInfo.RequestedVolumes = 0
|
||||
}
|
||||
|
||||
// Resource is a collection of compute resource.
|
||||
type Resource struct {
|
||||
MilliCPU int64
|
||||
|
|
Loading…
Reference in New Issue