mirror of https://github.com/k3s-io/k3s
Merge pull request #34237 from gmarek/scheduler-bench
Automatic merge from submit-queue Small update to scheduler benchmark cc @hongchaodengpull/6/head
commit
fa88f98f84
|
@ -83,7 +83,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeListe
|
|||
} else {
|
||||
trace = util.NewTrace("Scheduling <nil> pod")
|
||||
}
|
||||
defer trace.LogIfLong(20 * time.Millisecond)
|
||||
defer trace.LogIfLong(100 * time.Millisecond)
|
||||
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
|
|
|
@ -18,16 +18,27 @@ package benchmark
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
threshold3K = 100
|
||||
threshold30K = 30
|
||||
threshold60K = 30
|
||||
)
|
||||
|
||||
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
|
||||
func TestSchedule100Node3KPods(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping because we want to run short tests")
|
||||
}
|
||||
schedulePods(100, 3000)
|
||||
if min := schedulePods(100, 3000); min < threshold3K {
|
||||
t.Errorf("To small pod scheduling throughput for 3k pods. Expected %v got %v", threshold3K, min)
|
||||
} else {
|
||||
fmt.Printf("Minimal observed throughput for 3k pod test: %v\n", min)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedule1000Node30KPods schedules 30k pods on 1000 nodes.
|
||||
|
@ -35,14 +46,32 @@ func TestSchedule1000Node30KPods(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("Skipping because we want to run short tests")
|
||||
}
|
||||
schedulePods(1000, 30000)
|
||||
if min := schedulePods(1000, 30000); min < threshold30K {
|
||||
t.Errorf("To small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
|
||||
} else {
|
||||
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedule2000Node60KPods schedules 60k pods on 2000 nodes.
|
||||
// This test won't fit in normal 10 minutes time window.
|
||||
// func TestSchedule2000Node60KPods(t *testing.T) {
|
||||
// if testing.Short() {
|
||||
// t.Skip("Skipping because we want to run short tests")
|
||||
// }
|
||||
// if min := schedulePods(2000, 60000); min < threshold60K {
|
||||
// t.Errorf("To small pod scheduling throughput for 60k pods. Expected %v got %v", threshold60K, min)
|
||||
// } else {
|
||||
// fmt.Printf("Minimal observed throughput for 60k pod test: %v\n", min)
|
||||
// }
|
||||
// }
|
||||
|
||||
// schedulePods schedules specific number of pods on specific number of nodes.
|
||||
// This is used to learn the scheduling throughput on various
|
||||
// sizes of cluster and changes as more and more pods are scheduled.
|
||||
// It won't stop until all pods are scheduled.
|
||||
func schedulePods(numNodes, numPods int) {
|
||||
// It retruns the minimum of throughput over whole run.
|
||||
func schedulePods(numNodes, numPods int) int32 {
|
||||
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
|
||||
defer destroyFunc()
|
||||
c := schedulerConfigFactory.Client
|
||||
|
@ -51,16 +80,25 @@ func schedulePods(numNodes, numPods int) {
|
|||
makePodsFromRC(c, "rc1", numPods)
|
||||
|
||||
prev := 0
|
||||
minQps := int32(math.MaxInt32)
|
||||
start := time.Now()
|
||||
for {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
|
||||
fmt.Printf("%ds\trate: %d\ttotal: %d\n", time.Since(start)/time.Second, len(scheduled)-prev, len(scheduled))
|
||||
if len(scheduled) >= numPods {
|
||||
return
|
||||
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average).\n",
|
||||
numPods, int(time.Since(start)/time.Second), numPods/int(time.Since(start)/time.Second))
|
||||
return minQps
|
||||
}
|
||||
// There's no point in printing it for the last iteration, as the value is random
|
||||
qps := len(scheduled) - prev
|
||||
if int32(qps) < minQps {
|
||||
minQps = int32(qps)
|
||||
}
|
||||
|
||||
fmt.Printf("%ds\trate: %d\ttotal: %d\n", time.Since(start)/time.Second, qps, len(scheduled))
|
||||
prev = len(scheduled)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
|
|
@ -44,8 +44,6 @@ import (
|
|||
// Notes on rate limiter:
|
||||
// - client rate limit is set to 5000.
|
||||
func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destroyFunc func()) {
|
||||
// framework.DeleteAllEtcdKeys()
|
||||
|
||||
var m *master.Master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
m, err := masterConfig.Complete().New()
|
||||
|
|
Loading…
Reference in New Issue