From f3823cc2cfe869734509a908efdb5b43291f206f Mon Sep 17 00:00:00 2001 From: wackxu Date: Thu, 19 Jul 2018 16:04:58 +0800 Subject: [PATCH] fix e2e tests which set PodPriority are failing --- test/e2e/framework/gpu_util.go | 4 ++-- test/e2e_node/BUILD | 1 + test/e2e_node/eviction_test.go | 36 ++++++++++++++++++++++++++---- test/e2e_node/gpu_device_plugin.go | 6 +++-- 4 files changed, 39 insertions(+), 8 deletions(-) diff --git a/test/e2e/framework/gpu_util.go b/test/e2e/framework/gpu_util.go index 2cd9f33ee7..17abe0b2af 100644 --- a/test/e2e/framework/gpu_util.go +++ b/test/e2e/framework/gpu_util.go @@ -50,13 +50,13 @@ func NumberOfNVIDIAGPUs(node *v1.Node) int64 { } // NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE -func NVIDIADevicePlugin(ns string) *v1.Pod { +func NVIDIADevicePlugin() *v1.Pod { ds, err := DsFromManifest(GPUDevicePluginDSYAML) Expect(err).NotTo(HaveOccurred()) p := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()), - Namespace: ns, + Namespace: metav1.NamespaceSystem, }, Spec: ds.Spec.Template.Spec, diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 03461b0c02..4c92288a00 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -129,6 +129,7 @@ go_test( "//pkg/kubelet/types:go_default_library", "//pkg/security/apparmor:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 5160578be0..961cfdf010 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -24,6 +24,8 @@ import ( "time" "k8s.io/api/core/v1" + schedulerapi "k8s.io/api/scheduling/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -285,6 +287,20 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ expectedNodeCondition := v1.NodeMemoryPressure expectedStarvedResource := v1.ResourceMemory pressureTimeout := 10 * time.Minute + + highPriorityClassName := f.BaseName + "-high-priority" + highPriority := int32(999999999) + + BeforeEach(func() { + _, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) + Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue()) + }) + + AfterEach(func() { + err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.FeatureGates[string(features.PodPriority)] = true @@ -318,8 +334,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ }), }, } - systemPriority := int32(2147483647) - specs[1].pod.Spec.Priority = &systemPriority + specs[1].pod.Spec.PriorityClassName = highPriorityClassName runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, specs) }) }) @@ -332,6 +347,20 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser expectedNodeCondition := v1.NodeDiskPressure expectedStarvedResource := v1.ResourceEphemeralStorage pressureTimeout := 10 * time.Minute + + highPriorityClassName := f.BaseName + "-high-priority" + highPriority := int32(999999999) + + BeforeEach(func() { + _, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) + Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue()) + }) + + AfterEach(func() { + err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.FeatureGates[string(features.PodPriority)] = true @@ -367,8 +396,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser }), }, } - systemPriority := int32(2147483647) - specs[1].pod.Spec.Priority = &systemPriority + specs[1].pod.Spec.PriorityClassName = highPriorityClassName runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, specs) }) }) diff --git a/test/e2e_node/gpu_device_plugin.go b/test/e2e_node/gpu_device_plugin.go index 34fca3b1fa..8328324b69 100644 --- a/test/e2e_node/gpu_device_plugin.go +++ b/test/e2e_node/gpu_device_plugin.go @@ -42,6 +42,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi Context("DevicePlugin", func() { var devicePluginPod *v1.Pod + var err error BeforeEach(func() { By("Ensuring that Nvidia GPUs exists on the node") if !checkIfNvidiaGPUsExistOnNode() { @@ -49,7 +50,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi } By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE") - devicePluginPod = f.PodClient().CreateSync(framework.NVIDIADevicePlugin(f.Namespace.Name)) + devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(framework.NVIDIADevicePlugin()) + framework.ExpectNoError(err) By("Waiting for GPUs to become available on the local node") Eventually(func() bool { @@ -106,7 +108,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi Expect(devId1).To(Not(Equal(devId2))) By("Deleting device plugin.") - f.PodClient().Delete(devicePluginPod.Name, &metav1.DeleteOptions{}) + f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(devicePluginPod.Name, &metav1.DeleteOptions{}) By("Waiting for GPUs to become unavailable on the local node") Eventually(func() bool { node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})