mirror of https://github.com/k3s-io/k3s
Add integration test for disable preemption
parent
4f0bd4121e
commit
d36fc30233
|
@ -41,8 +41,8 @@ import (
|
|||
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
|
||||
func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -57,6 +57,10 @@ func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForNominatedNodeNameWithTimeout(cs, pod, wait.ForeverTestTimeout)
|
||||
}
|
||||
|
||||
// TestPreemption tests a few preemption scenarios.
|
||||
func TestPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
|
@ -285,6 +289,88 @@ func TestPreemption(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestDisablePreemption tests disable pod preemption of scheduler works as expected.
|
||||
func TestDisablePreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
// Initialize scheduler, and disable preemption.
|
||||
context := initTestDisablePreemption(t, "disable-preemption")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
}{
|
||||
{
|
||||
description: "pod preemption will not happen",
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
pods[i], err = runPausePod(cs, p)
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// Create the "pod".
|
||||
preemptor, err := createPausePod(cs, test.pod)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating high priority pod: %v", err)
|
||||
}
|
||||
// Ensure preemptor should keep unschedulable.
|
||||
if err := waitForPodUnschedulable(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: Preemptor %v should not become scheduled",
|
||||
test.description, preemptor.Name)
|
||||
}
|
||||
|
||||
// Ensure preemptor should not be nominated.
|
||||
if err := waitForNominatedNodeNameWithTimeout(cs, preemptor, 5*time.Second); err == nil {
|
||||
t.Errorf("Test [%v]: Preemptor %v should not be nominated",
|
||||
test.description, preemptor.Name)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
cleanupPods(cs, t, pods)
|
||||
}
|
||||
}
|
||||
|
||||
func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace int64) *v1.Pod {
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
|
|
|
@ -63,7 +63,7 @@ type TestContext struct {
|
|||
scheduler *scheduler.Scheduler
|
||||
}
|
||||
|
||||
// createConfiguratorWithPodInformer create a configurator for scheduler with given informer factory, custom name and pod informer.
|
||||
// createConfiguratorWithPodInformer creates a configurator for scheduler.
|
||||
func createConfiguratorWithPodInformer(
|
||||
schedulerName string,
|
||||
clientSet clientset.Interface,
|
||||
|
@ -116,7 +116,14 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
|
|||
}
|
||||
|
||||
// 2. Create kubeclient
|
||||
context.clientSet = clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
context.clientSet = clientset.NewForConfigOrDie(
|
||||
&restclient.Config{
|
||||
QPS: -1, Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{
|
||||
GroupVersion: testapi.Groups[v1.GroupName].GroupVersion(),
|
||||
},
|
||||
},
|
||||
)
|
||||
return &context
|
||||
}
|
||||
|
||||
|
@ -128,6 +135,21 @@ func initTestScheduler(
|
|||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *TestContext {
|
||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||
// feature gate is enabled at the same time.
|
||||
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false)
|
||||
}
|
||||
|
||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
// configuration and other options.
|
||||
func initTestSchedulerWithOptions(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
disablePreemption bool,
|
||||
) *TestContext {
|
||||
// Enable EnableEquivalenceClassCache for all integration tests.
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
|
@ -167,19 +189,29 @@ func initTestScheduler(
|
|||
context.schedulerConfig.StopEverything = controllerCh
|
||||
}
|
||||
|
||||
// set DisablePreemption option
|
||||
context.schedulerConfig.DisablePreemption = disablePreemption
|
||||
|
||||
// set setPodInformer if provided.
|
||||
if setPodInformer {
|
||||
go podInformer.Informer().Run(context.schedulerConfig.StopEverything)
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: context.clientSet.CoreV1().Events("")})
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(
|
||||
legacyscheme.Scheme,
|
||||
v1.EventSource{Component: v1.DefaultSchedulerName},
|
||||
)
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
||||
Interface: context.clientSet.CoreV1().Events(""),
|
||||
})
|
||||
|
||||
context.informerFactory.Start(context.schedulerConfig.StopEverything)
|
||||
context.informerFactory.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
||||
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: context.schedulerConfig}, nil...)
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{
|
||||
Config: context.schedulerConfig},
|
||||
nil...)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
||||
}
|
||||
|
@ -193,6 +225,13 @@ func initTest(t *testing.T, nsPrefix string) *TestContext {
|
|||
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), nil, true, nil)
|
||||
}
|
||||
|
||||
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
|
||||
// configuration but with pod preemption disabled.
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestSchedulerWithOptions(
|
||||
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true)
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
// at the end of a test.
|
||||
func cleanupTest(t *testing.T, context *TestContext) {
|
||||
|
@ -206,7 +245,8 @@ func cleanupTest(t *testing.T, context *TestContext) {
|
|||
|
||||
// waitForReflection waits till the passFunc confirms that the object it expects
|
||||
// to see is in the store. Used to observe reflected events.
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string, passFunc func(n interface{}) bool) error {
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string,
|
||||
passFunc func(n interface{}) bool) error {
|
||||
nodes := []*v1.Node{}
|
||||
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
n, err := nodeLister.Get(key)
|
||||
|
@ -345,7 +385,8 @@ func createPausePod(cs clientset.Interface, p *v1.Pod) (*v1.Pod, error) {
|
|||
// createPausePodWithResource creates a pod with "Pause" image and the given
|
||||
// resources and returns its pointer and error status. The resource list can be
|
||||
// nil.
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string, nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string,
|
||||
nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
var conf pausePodConfig
|
||||
if res == nil {
|
||||
conf = pausePodConfig{
|
||||
|
@ -439,7 +480,8 @@ func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.
|
|||
return false, nil
|
||||
}
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
return cond != nil && cond.Status == v1.ConditionFalse &&
|
||||
cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -481,7 +523,8 @@ func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
|||
}
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout,
|
||||
podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("error while waiting for pod %v/%v to get deleted: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue