mirror of https://github.com/k3s-io/k3s
shorten scheduler package alias for better readability
- schedulerinternalcache -> internalcachek3s-v1.15.3
parent
0c93929298
commit
49346c1e04
|
@ -29,7 +29,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
|
@ -530,7 +530,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
for ii := range test.extenders {
|
||||
extenders = append(extenders, &test.extenders[ii])
|
||||
}
|
||||
cache := schedulerinternalcache.New(time.Duration(0), wait.NeverStop)
|
||||
cache := internalcache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, name := range test.nodes {
|
||||
cache.AddNode(createNode(name))
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
@ -131,7 +131,7 @@ type ScheduleResult struct {
|
|||
}
|
||||
|
||||
type genericScheduler struct {
|
||||
cache schedulerinternalcache.Cache
|
||||
cache internalcache.Cache
|
||||
schedulingQueue internalqueue.SchedulingQueue
|
||||
predicates map[string]predicates.FitPredicate
|
||||
priorityMetaProducer priorities.PriorityMetadataProducer
|
||||
|
@ -141,7 +141,7 @@ type genericScheduler struct {
|
|||
extenders []algorithm.SchedulerExtender
|
||||
lastNodeIndex uint64
|
||||
alwaysCheckAllPredicates bool
|
||||
nodeInfoSnapshot schedulerinternalcache.NodeInfoSnapshot
|
||||
nodeInfoSnapshot internalcache.NodeInfoSnapshot
|
||||
volumeBinder *volumebinder.VolumeBinder
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
pdbLister algorithm.PDBLister
|
||||
|
@ -1205,7 +1205,7 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
|
|||
|
||||
// NewGenericScheduler creates a genericScheduler object.
|
||||
func NewGenericScheduler(
|
||||
cache schedulerinternalcache.Cache,
|
||||
cache internalcache.Cache,
|
||||
podQueue internalqueue.SchedulingQueue,
|
||||
predicates map[string]predicates.FitPredicate,
|
||||
predicateMetaProducer predicates.PredicateMetadataProducer,
|
||||
|
@ -1229,7 +1229,7 @@ func NewGenericScheduler(
|
|||
priorityMetaProducer: priorityMetaProducer,
|
||||
pluginSet: pluginSet,
|
||||
extenders: extenders,
|
||||
nodeInfoSnapshot: schedulerinternalcache.NewNodeInfoSnapshot(),
|
||||
nodeInfoSnapshot: internalcache.NewNodeInfoSnapshot(),
|
||||
volumeBinder: volumeBinder,
|
||||
pvcLister: pvcLister,
|
||||
pdbLister: pdbLister,
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1"
|
||||
|
@ -455,7 +455,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cache := schedulerinternalcache.New(time.Duration(0), wait.NeverStop)
|
||||
cache := internalcache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, pod := range test.pods {
|
||||
cache.AddPod(pod)
|
||||
}
|
||||
|
@ -497,7 +497,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
// makeScheduler makes a simple genericScheduler for testing.
|
||||
func makeScheduler(predicates map[string]algorithmpredicates.FitPredicate, nodes []*v1.Node) *genericScheduler {
|
||||
algorithmpredicates.SetPredicatesOrdering(order)
|
||||
cache := schedulerinternalcache.New(time.Duration(0), wait.NeverStop)
|
||||
cache := internalcache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, n := range nodes {
|
||||
cache.AddNode(n)
|
||||
}
|
||||
|
@ -1469,7 +1469,7 @@ func TestPreempt(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Logf("===== Running test %v", t.Name())
|
||||
stop := make(chan struct{})
|
||||
cache := schedulerinternalcache.New(time.Duration(0), stop)
|
||||
cache := internalcache.New(time.Duration(0), stop)
|
||||
for _, pod := range test.pods {
|
||||
cache.AddPod(pod)
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ import (
|
|||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/api/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
"k8s.io/kubernetes/pkg/scheduler/plugins"
|
||||
|
@ -81,7 +81,7 @@ type PodConditionUpdater interface {
|
|||
type Config struct {
|
||||
// It is expected that changes made via SchedulerCache will be observed
|
||||
// by NodeLister and Algorithm.
|
||||
SchedulerCache schedulerinternalcache.Cache
|
||||
SchedulerCache internalcache.Cache
|
||||
|
||||
NodeLister algorithm.NodeLister
|
||||
Algorithm core.ScheduleAlgorithm
|
||||
|
@ -191,7 +191,7 @@ type configFactory struct {
|
|||
|
||||
scheduledPodsHasSynced cache.InformerSynced
|
||||
|
||||
schedulerCache schedulerinternalcache.Cache
|
||||
schedulerCache internalcache.Cache
|
||||
|
||||
// SchedulerName of a scheduler is used to select which pods will be
|
||||
// processed by this scheduler, based on pods's "spec.schedulerName".
|
||||
|
@ -247,7 +247,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) Configurator {
|
|||
if stopEverything == nil {
|
||||
stopEverything = wait.NeverStop
|
||||
}
|
||||
schedulerCache := schedulerinternalcache.New(30*time.Second, stopEverything)
|
||||
schedulerCache := internalcache.New(30*time.Second, stopEverything)
|
||||
|
||||
// storageClassInformer is only enabled through VolumeScheduling feature gate
|
||||
var storageClassLister storagelisters.StorageClassLister
|
||||
|
@ -638,7 +638,7 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) core
|
|||
}
|
||||
|
||||
// MakeDefaultErrorFunc construct a function to handle pod scheduler error
|
||||
func MakeDefaultErrorFunc(client clientset.Interface, backoff *util.PodBackoff, podQueue internalqueue.SchedulingQueue, schedulerCache schedulerinternalcache.Cache, stopEverything <-chan struct{}) func(pod *v1.Pod, err error) {
|
||||
func MakeDefaultErrorFunc(client clientset.Interface, backoff *util.PodBackoff, podQueue internalqueue.SchedulingQueue, schedulerCache internalcache.Cache, stopEverything <-chan struct{}) func(pod *v1.Pod, err error) {
|
||||
return func(pod *v1.Pod, err error) {
|
||||
if err == core.ErrNoNodesAvailable {
|
||||
klog.V(4).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
|
@ -254,7 +254,7 @@ func TestDefaultErrorFunc(t *testing.T) {
|
|||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
queue := &internalqueue.FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||
schedulerCache := schedulerinternalcache.New(30*time.Second, stopCh)
|
||||
schedulerCache := internalcache.New(30*time.Second, stopCh)
|
||||
podBackoff := util.CreatePodBackoff(1*time.Millisecond, 1*time.Second)
|
||||
errFunc := MakeDefaultErrorFunc(client, podBackoff, queue, schedulerCache, stopCh)
|
||||
|
||||
|
|
|
@ -1064,7 +1064,7 @@ func TestNodeOperators(t *testing.T) {
|
|||
// Case 1: the node was added into cache successfully.
|
||||
got, found := cache.nodes[node.Name]
|
||||
if !found {
|
||||
t.Errorf("Failed to find node %v in schedulerinternalcache.", node.Name)
|
||||
t.Errorf("Failed to find node %v in internalcache.", node.Name)
|
||||
}
|
||||
if cache.nodeTree.NumNodes() != 1 || cache.nodeTree.Next() != node.Name {
|
||||
t.Errorf("cache.nodeTree is not updated correctly after adding node: %v", node.Name)
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
@ -33,7 +33,7 @@ import (
|
|||
type CacheComparer struct {
|
||||
NodeLister corelisters.NodeLister
|
||||
PodLister corelisters.PodLister
|
||||
Cache schedulerinternalcache.Cache
|
||||
Cache internalcache.Cache
|
||||
PodQueue internalqueue.SchedulingQueue
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
// Cache is used for testing
|
||||
|
@ -75,7 +75,7 @@ func (c *Cache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
|
|||
func (c *Cache) RemoveNode(node *v1.Node) error { return nil }
|
||||
|
||||
// UpdateNodeInfoSnapshot is a fake method for testing.
|
||||
func (c *Cache) UpdateNodeInfoSnapshot(nodeSnapshot *schedulerinternalcache.NodeInfoSnapshot) error {
|
||||
func (c *Cache) UpdateNodeInfoSnapshot(nodeSnapshot *internalcache.NodeInfoSnapshot) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -88,9 +88,9 @@ func (c *Cache) FilteredList(filter algorithm.PodFilter, selector labels.Selecto
|
|||
}
|
||||
|
||||
// Snapshot is a fake method for testing
|
||||
func (c *Cache) Snapshot() *schedulerinternalcache.Snapshot {
|
||||
return &schedulerinternalcache.Snapshot{}
|
||||
func (c *Cache) Snapshot() *internalcache.Snapshot {
|
||||
return &internalcache.Snapshot{}
|
||||
}
|
||||
|
||||
// NodeTree is a fake method for testing.
|
||||
func (c *Cache) NodeTree() *schedulerinternalcache.NodeTree { return nil }
|
||||
func (c *Cache) NodeTree() *internalcache.NodeTree { return nil }
|
||||
|
|
|
@ -40,7 +40,7 @@ import (
|
|||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
@ -59,7 +59,7 @@ type Scheduler struct {
|
|||
}
|
||||
|
||||
// Cache returns the cache in scheduler for test to check the data in scheduler.
|
||||
func (sched *Scheduler) Cache() schedulerinternalcache.Cache {
|
||||
func (sched *Scheduler) Cache() internalcache.Cache {
|
||||
return sched.config.SchedulerCache
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ import (
|
|||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
|
@ -331,7 +331,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
|||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := schedulerinternalcache.New(100*time.Millisecond, stop)
|
||||
scache := internalcache.New(100*time.Millisecond, stop)
|
||||
pod := podWithPort("pod.Name", "", 8080)
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
scache.AddNode(&node)
|
||||
|
@ -390,7 +390,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
|||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := schedulerinternalcache.New(10*time.Minute, stop)
|
||||
scache := internalcache.New(10*time.Minute, stop)
|
||||
firstPod := podWithPort("pod.Name", "", 8080)
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
scache.AddNode(&node)
|
||||
|
@ -478,7 +478,7 @@ func TestSchedulerErrorWithLongBinding(t *testing.T) {
|
|||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := schedulerinternalcache.New(test.CacheTTL, stop)
|
||||
scache := internalcache.New(test.CacheTTL, stop)
|
||||
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
scache.AddNode(&node)
|
||||
|
@ -519,7 +519,7 @@ func TestSchedulerErrorWithLongBinding(t *testing.T) {
|
|||
|
||||
// queuedPodStore: pods queued before processing.
|
||||
// cache: scheduler cache that might contain assumed pods.
|
||||
func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache,
|
||||
func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache internalcache.Cache,
|
||||
informerFactory informers.SharedInformerFactory, stop chan struct{}, predicateMap map[string]predicates.FitPredicate, pod *v1.Pod, node *v1.Node) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
|
||||
scheduler, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, predicateMap, nil)
|
||||
|
@ -554,7 +554,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := schedulerinternalcache.New(10*time.Minute, stop)
|
||||
scache := internalcache.New(10*time.Minute, stop)
|
||||
|
||||
// Design the baseline for the pods, and we will make nodes that dont fit it later.
|
||||
var cpu = int64(4)
|
||||
|
@ -631,7 +631,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||
|
||||
// queuedPodStore: pods queued before processing.
|
||||
// scache: scheduler cache that might contain assumed pods.
|
||||
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, recorder record.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, recorder record.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
algo := core.NewGenericScheduler(
|
||||
scache,
|
||||
internalqueue.NewSchedulingQueue(nil),
|
||||
|
@ -683,7 +683,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerintern
|
|||
return sched, bindingChan, errChan
|
||||
}
|
||||
|
||||
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
|
||||
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
|
||||
algo := core.NewGenericScheduler(
|
||||
scache,
|
||||
internalqueue.NewSchedulingQueue(nil),
|
||||
|
@ -741,7 +741,7 @@ func setupTestSchedulerWithVolumeBinding(fakeVolumeBinder *volumebinder.VolumeBi
|
|||
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{Name: "testVol",
|
||||
VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "testPVC"}}})
|
||||
queuedPodStore.Add(pod)
|
||||
scache := schedulerinternalcache.New(10*time.Minute, stop)
|
||||
scache := internalcache.New(10*time.Minute, stop)
|
||||
scache.AddNode(&testNode)
|
||||
testPVC := v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "testPVC", Namespace: pod.Namespace, UID: types.UID("testPVC")}}
|
||||
client := clientsetfake.NewSimpleClientset(&testNode, &testPVC)
|
||||
|
|
Loading…
Reference in New Issue