mirror of https://github.com/k3s-io/k3s
Implement multi-scheduler:
1. Name default scheduler with name `kube-scheduler` 2. The default scheduler only schedules the pods meeting the following condition: - the pod has no annotation "scheduler.alpha.kubernetes.io/name: <scheduler-name>" - the pod has annotation "scheduler.alpha.kubernetes.io/name: kube-scheduler" update gofmt update according to @david's review run hack/test-integration.sh, hack/test-go.sh and local e2e.testpull/6/head
parent
4b18fa553f
commit
d9f3607292
|
@ -171,13 +171,13 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
|||
handler.delegate = m.Handler
|
||||
|
||||
// Scheduler
|
||||
schedulerConfigFactory := factory.NewConfigFactory(cl, nil)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(cl, nil, api.DefaultSchedulerName)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
glog.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(cl.Events(""))
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
|
|
@ -66,6 +66,7 @@ kube-scheduler
|
|||
--policy-config-file="": File with scheduler policy configuration
|
||||
--port=10251: The port that the scheduler's http service runs on
|
||||
--profiling[=true]: Enable profiling via web interface host:port/debug/pprof/
|
||||
--scheduler-name="default-scheduler": Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'
|
||||
```
|
||||
|
||||
###### Auto generated by spf13/cobra on 14-Dec-2015
|
||||
|
|
|
@ -288,6 +288,7 @@ run-proxy
|
|||
runtime-config
|
||||
save-config
|
||||
scheduler-config
|
||||
scheduler-name
|
||||
schema-cache-dir
|
||||
secure-port
|
||||
serialize-image-pulls
|
||||
|
|
|
@ -2218,3 +2218,8 @@ type RangeAllocation struct {
|
|||
// a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4).
|
||||
Data []byte `json:"data"`
|
||||
}
|
||||
|
||||
const (
|
||||
// "default-scheduler" is the name of default scheduler.
|
||||
DefaultSchedulerName = "default-scheduler"
|
||||
)
|
||||
|
|
|
@ -2624,3 +2624,8 @@ type RangeAllocation struct {
|
|||
// Data is a bit array containing all allocated addresses in the previous segment.
|
||||
Data []byte `json:"data"`
|
||||
}
|
||||
|
||||
const (
|
||||
// "default-scheduler" is the name of default scheduler.
|
||||
DefaultSchedulerName = "default-scheduler"
|
||||
)
|
||||
|
|
|
@ -58,6 +58,7 @@ type SchedulerServer struct {
|
|||
BindPodsBurst int
|
||||
KubeAPIQPS float32
|
||||
KubeAPIBurst int
|
||||
SchedulerName string
|
||||
}
|
||||
|
||||
// NewSchedulerServer creates a new SchedulerServer with default parameters
|
||||
|
@ -70,6 +71,7 @@ func NewSchedulerServer() *SchedulerServer {
|
|||
BindPodsBurst: 100,
|
||||
KubeAPIQPS: 50.0,
|
||||
KubeAPIBurst: 100,
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
@ -107,6 +109,7 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.IntVar(&s.BindPodsBurst, "bind-pods-burst", s.BindPodsBurst, "Number of bindings per second scheduler is allowed to make during bursts")
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'")
|
||||
}
|
||||
|
||||
// Run runs the specified SchedulerServer. This should never exit.
|
||||
|
@ -142,14 +145,14 @@ func (s *SchedulerServer) Run(_ []string) error {
|
|||
glog.Fatal(server.ListenAndServe())
|
||||
}()
|
||||
|
||||
configFactory := factory.NewConfigFactory(kubeClient, util.NewTokenBucketRateLimiter(s.BindPodsQPS, s.BindPodsBurst))
|
||||
configFactory := factory.NewConfigFactory(kubeClient, util.NewTokenBucketRateLimiter(s.BindPodsQPS, s.BindPodsBurst), s.SchedulerName)
|
||||
config, err := s.createConfig(configFactory)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create scheduler configuration: %v", err)
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
|
||||
config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: s.SchedulerName})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||
if !reflect.DeepEqual(policy, tc.ExpectedPolicy) {
|
||||
t.Errorf("%s: Expected:\n\t%#v\nGot:\n\t%#v", v, tc.ExpectedPolicy, policy)
|
||||
}
|
||||
_, err = factory.NewConfigFactory(nil, nil).CreateFromConfig(policy)
|
||||
_, err = factory.NewConfigFactory(nil, nil, "some-scheduler-name").CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Error constructing: %v", v, err)
|
||||
continue
|
||||
|
|
|
@ -43,6 +43,10 @@ import (
|
|||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
SchedulerAnnotationKey = "scheduler.alpha.kubernetes.io/name"
|
||||
)
|
||||
|
||||
// ConfigFactory knows how to fill out a scheduler config with its support functions.
|
||||
type ConfigFactory struct {
|
||||
Client *client.Client
|
||||
|
@ -66,10 +70,15 @@ type ConfigFactory struct {
|
|||
|
||||
scheduledPodPopulator *framework.Controller
|
||||
modeler scheduler.SystemModeler
|
||||
|
||||
// SchedulerName of a scheduler is used to select which pods will be
|
||||
// processed by this scheduler, based on pods's annotation key:
|
||||
// 'scheduler.alpha.kubernetes.io/name'
|
||||
SchedulerName string
|
||||
}
|
||||
|
||||
// Initializes the factory.
|
||||
func NewConfigFactory(client *client.Client, rateLimiter util.RateLimiter) *ConfigFactory {
|
||||
func NewConfigFactory(client *client.Client, rateLimiter util.RateLimiter, schedulerName string) *ConfigFactory {
|
||||
c := &ConfigFactory{
|
||||
Client: client,
|
||||
PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
|
||||
|
@ -79,6 +88,7 @@ func NewConfigFactory(client *client.Client, rateLimiter util.RateLimiter) *Conf
|
|||
ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
|
||||
StopEverything: make(chan struct{}),
|
||||
SchedulerName: schedulerName,
|
||||
}
|
||||
modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{Store: c.PodQueue}, c.ScheduledPodLister)
|
||||
c.modeler = modeler
|
||||
|
@ -228,9 +238,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String,
|
|||
Algorithm: algo,
|
||||
Binder: &binder{f.Client},
|
||||
NextPod: func() *api.Pod {
|
||||
pod := f.PodQueue.Pop().(*api.Pod)
|
||||
glog.V(2).Infof("About to try and schedule pod %v", pod.Name)
|
||||
return pod
|
||||
return f.getNextPod()
|
||||
},
|
||||
Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue),
|
||||
BindPodsRateLimiter: f.BindPodsRateLimiter,
|
||||
|
@ -238,6 +246,24 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String,
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (f *ConfigFactory) getNextPod() *api.Pod {
|
||||
for {
|
||||
pod := f.PodQueue.Pop().(*api.Pod)
|
||||
if f.responsibleForPod(pod) {
|
||||
glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
|
||||
return pod
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ConfigFactory) responsibleForPod(pod *api.Pod) bool {
|
||||
if f.SchedulerName == api.DefaultSchedulerName {
|
||||
return pod.Annotations[SchedulerAnnotationKey] == f.SchedulerName || pod.Annotations[SchedulerAnnotationKey] == ""
|
||||
} else {
|
||||
return pod.Annotations[SchedulerAnnotationKey] == f.SchedulerName
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeConditionPredicate() cache.NodeConditionPredicate {
|
||||
return func(node api.Node) bool {
|
||||
for _, cond := range node.Status.Conditions {
|
||||
|
|
|
@ -45,7 +45,7 @@ func TestCreate(t *testing.T) {
|
|||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: server.URL, GroupVersion: testapi.Default.GroupVersion()})
|
||||
factory := NewConfigFactory(client, nil)
|
||||
factory := NewConfigFactory(client, nil, api.DefaultSchedulerName)
|
||||
factory.Create()
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ func TestCreateFromConfig(t *testing.T) {
|
|||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: server.URL, GroupVersion: testapi.Default.GroupVersion()})
|
||||
factory := NewConfigFactory(client, nil)
|
||||
factory := NewConfigFactory(client, nil, api.DefaultSchedulerName)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
|
@ -105,7 +105,7 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
|||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: server.URL, GroupVersion: testapi.Default.GroupVersion()})
|
||||
factory := NewConfigFactory(client, nil)
|
||||
factory := NewConfigFactory(client, nil, api.DefaultSchedulerName)
|
||||
|
||||
configData = []byte(`{}`)
|
||||
err := latestschedulerapi.Codec.DecodeInto(configData, &policy)
|
||||
|
@ -148,7 +148,7 @@ func TestDefaultErrorFunc(t *testing.T) {
|
|||
mux.Handle(testapi.Default.ResourcePath("pods", "bar", "foo"), &handler)
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
factory := NewConfigFactory(client.NewOrDie(&client.Config{Host: server.URL, GroupVersion: testapi.Default.GroupVersion()}), nil)
|
||||
factory := NewConfigFactory(client.NewOrDie(&client.Config{Host: server.URL, GroupVersion: testapi.Default.GroupVersion()}), nil, api.DefaultSchedulerName)
|
||||
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
|
||||
podBackoff := podBackoff{
|
||||
perPodBackoff: map[types.NamespacedName]*backoffEntry{},
|
||||
|
@ -302,3 +302,71 @@ func TestBackoff(t *testing.T) {
|
|||
t.Errorf("expected: 1, got %s", duration.String())
|
||||
}
|
||||
}
|
||||
|
||||
// TestResponsibleForPod tests if a pod with an annotation that should cause it to
|
||||
// be picked up by the default scheduler, is in fact picked by the default scheduler
|
||||
// Two schedulers are made in the test: one is default scheduler and other scheduler
|
||||
// is of name "foo-scheduler". A pod must be picked up by at most one of the two
|
||||
// schedulers.
|
||||
func TestResponsibleForPod(t *testing.T) {
|
||||
handler := util.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: server.URL, GroupVersion: testapi.Default.GroupVersion()})
|
||||
// factory of "default-scheduler"
|
||||
factoryDefaultScheduler := NewConfigFactory(client, nil, api.DefaultSchedulerName)
|
||||
// factory of "foo-scheduler"
|
||||
factoryFooScheduler := NewConfigFactory(client, nil, "foo-scheduler")
|
||||
// scheduler annotaions to be tested
|
||||
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
|
||||
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
|
||||
schedulerAnnotationFitsNone := map[string]string{"scheduler.alpha.kubernetes.io/name": "bar-scheduler"}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pickedByDefault bool
|
||||
pickedByFoo bool
|
||||
}{
|
||||
{
|
||||
// pod with no annotation "scheduler.alpha.kubernetes.io/name=<scheduler-name>" should be
|
||||
// picked by the default scheduler, NOT by the one of name "foo-scheduler"
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}},
|
||||
pickedByDefault: true,
|
||||
pickedByFoo: false,
|
||||
},
|
||||
{
|
||||
// pod with annotation "scheduler.alpha.kubernetes.io/name=default-scheduler" should be picked
|
||||
// by the scheduler of name "default-scheduler", NOT by the one of name "foo-scheduler"
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsDefault}},
|
||||
pickedByDefault: true,
|
||||
pickedByFoo: false,
|
||||
},
|
||||
{
|
||||
// pod with annotataion "scheduler.alpha.kubernetes.io/name=foo-scheduler" should be NOT
|
||||
// be picked by the scheduler of name "default-scheduler", but by the one of name "foo-scheduler"
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsFoo}},
|
||||
pickedByDefault: false,
|
||||
pickedByFoo: true,
|
||||
},
|
||||
{
|
||||
// pod with annotataion "scheduler.alpha.kubernetes.io/name=foo-scheduler" should be NOT
|
||||
// be picked by niether the scheduler of name "default-scheduler" nor the one of name "foo-scheduler"
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsNone}},
|
||||
pickedByDefault: false,
|
||||
pickedByFoo: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
podOfDefault := factoryDefaultScheduler.responsibleForPod(test.pod)
|
||||
podOfFoo := factoryFooScheduler.responsibleForPod(test.pod)
|
||||
results := []bool{podOfDefault, podOfFoo}
|
||||
expected := []bool{test.pickedByDefault, test.pickedByFoo}
|
||||
if !reflect.DeepEqual(results, expected) {
|
||||
t.Errorf("expected: {%v, %v}, got {%v, %v}", test.pickedByDefault, test.pickedByFoo, podOfDefault, podOfFoo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destro
|
|||
Burst: 5000,
|
||||
})
|
||||
|
||||
schedulerConfigFactory = factory.NewConfigFactory(c, nil)
|
||||
schedulerConfigFactory = factory.NewConfigFactory(c, nil, api.DefaultSchedulerName)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
panic("Couldn't create scheduler config")
|
||||
|
|
|
@ -91,7 +91,7 @@ var _ = Describe("Events", func() {
|
|||
"involvedObject.kind": "Pod",
|
||||
"involvedObject.uid": string(podWithUid.UID),
|
||||
"involvedObject.namespace": framework.Namespace.Name,
|
||||
"source": "scheduler",
|
||||
"source": api.DefaultSchedulerName,
|
||||
}.AsSelector()
|
||||
options := api.ListOptions{FieldSelector: selector}
|
||||
events, err := framework.Client.Events(framework.Namespace.Name).List(options)
|
||||
|
|
|
@ -238,13 +238,13 @@ func TestSchedulerExtender(t *testing.T) {
|
|||
}
|
||||
policy.APIVersion = testapi.Default.GroupVersion().String()
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, nil)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, nil, api.DefaultSchedulerName)
|
||||
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(restClient.Events(""))
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
|
|
|
@ -65,13 +65,13 @@ func TestUnschedulableNodes(t *testing.T) {
|
|||
|
||||
restClient := client.NewOrDie(&client.Config{Host: s.URL, GroupVersion: testapi.Default.GroupVersion()})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, nil)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, nil, api.DefaultSchedulerName)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(restClient.Events(""))
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
|
@ -273,3 +273,177 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiScheduler(t *testing.T) {
|
||||
framework.DeleteAllEtcdKeys()
|
||||
|
||||
var m *master.Master
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
m.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
m = master.New(masterConfig)
|
||||
/*
|
||||
This integration tests the multi-scheduler feature in the following way:
|
||||
1. create a default scheduler
|
||||
2. create a node
|
||||
3. create 3 pods: testPodNoAnnotation, testPodWithAnnotationFitsDefault and testPodWithAnnotationFitsFoo
|
||||
- note: the first two should be picked and scheduled by default scheduler while the last one should be
|
||||
picked by scheduler of name "foo-scheduler" which does not exist yet.
|
||||
4. **check point-1**:
|
||||
- testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled
|
||||
- testPodWithAnnotationFitsFoo should NOT be scheduled
|
||||
5. create a scheduler with name "foo-scheduler"
|
||||
6. **check point-2**:
|
||||
- testPodWithAnnotationFitsFoo should be scheduled
|
||||
7. stop default scheduler
|
||||
8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2
|
||||
- note: these two pods belong to default scheduler which no longer exists
|
||||
9. **check point-3**:
|
||||
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled
|
||||
*/
|
||||
// 1. create and start default-scheduler
|
||||
restClient := client.NewOrDie(&client.Config{Host: s.URL, GroupVersion: testapi.Default.GroupVersion()})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(restClient, nil, api.DefaultSchedulerName)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(restClient.Events(""))
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
// default-scheduler will be stopped later
|
||||
|
||||
// 2. create a node
|
||||
node := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||
Spec: api.NodeSpec{Unschedulable: false},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
restClient.Nodes().Create(node)
|
||||
|
||||
// 3. create 3 pods for testing
|
||||
podWithNoAnnotation := createPod("pod-with-no-annotation", nil)
|
||||
testPodNoAnnotation, err := restClient.Pods(api.NamespaceDefault).Create(podWithNoAnnotation)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
|
||||
podWithAnnotationFitsDefault := createPod("pod-with-annotation-fits-default", schedulerAnnotationFitsDefault)
|
||||
testPodWithAnnotationFitsDefault, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsDefault)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
|
||||
podWithAnnotationFitsFoo := createPod("pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo)
|
||||
testPodWithAnnotationFitsFoo, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsFoo)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 4. **check point-1**:
|
||||
// - testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled
|
||||
// - testPodWithAnnotationFitsFoo should NOT be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodNoAnnotation.Namespace, testPodNoAnnotation.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodNoAnnotation.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodNoAnnotation.Name)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsDefault.Namespace, testPodWithAnnotationFitsDefault.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodWithAnnotationFitsDefault.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsDefault.Name)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodWithAnnotationFitsFoo.Name)
|
||||
}
|
||||
|
||||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
restClient2 := client.NewOrDie(&client.Config{Host: s.URL, GroupVersion: testapi.Default.GroupVersion()})
|
||||
|
||||
schedulerConfigFactory2 := factory.NewConfigFactory(restClient2, nil, "foo-scheduler")
|
||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster2 := record.NewBroadcaster()
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.EventSource{Component: "foo-scheduler"})
|
||||
eventBroadcaster2.StartRecordingToSink(restClient2.Events(""))
|
||||
scheduler.New(schedulerConfig2).Run()
|
||||
|
||||
defer close(schedulerConfig2.StopEverything)
|
||||
|
||||
// 6. **check point-2**:
|
||||
// - testPodWithAnnotationFitsFoo should be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodWithAnnotationFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsFoo.Name)
|
||||
}
|
||||
|
||||
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
|
||||
err = restClient.Pods(api.NamespaceDefault).Delete(testPodNoAnnotation.Name, api.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
err = restClient.Pods(api.NamespaceDefault).Delete(testPodWithAnnotationFitsDefault.Name, api.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
close(schedulerConfig.StopEverything)
|
||||
|
||||
// 8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2
|
||||
// - note: these two pods belong to default scheduler which no longer exists
|
||||
podWithNoAnnotation2 := createPod("pod-with-no-annotation2", nil)
|
||||
podWithAnnotationFitsDefault2 := createPod("pod-with-annotation-fits-default2", schedulerAnnotationFitsDefault)
|
||||
testPodNoAnnotation2, err := restClient.Pods(api.NamespaceDefault).Create(podWithNoAnnotation2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
testPodWithAnnotationFitsDefault2, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsDefault2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 9. **check point-3**:
|
||||
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodNoAnnotation2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodNoAnnotation2.Name)
|
||||
}
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsDefault2.Namespace, testPodWithAnnotationFitsDefault2.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsDefault2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsDefault2.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func createPod(name string, annotation map[string]string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Annotations: annotation},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "container", Image: "kubernetes/pause:go"}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue