Merge pull request #66369 from wackxu/fixe2e

Automatic merge from submit-queue (batch tested with PRs 61212, 66369, 66446, 66895, 66969). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

fix e2e tests which set PodPriority are failing

fix https://github.com/kubernetes/kubernetes/issues/66357
```release-note
NONE
```
pull/8/head
Kubernetes Submit Queue 2018-08-14 21:18:08 -07:00 committed by GitHub
commit ad1483b58d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 39 additions and 8 deletions

View File

@ -50,13 +50,13 @@ func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
}
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
func NVIDIADevicePlugin(ns string) *v1.Pod {
func NVIDIADevicePlugin() *v1.Pod {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
Expect(err).NotTo(HaveOccurred())
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
Namespace: ns,
Namespace: metav1.NamespaceSystem,
},
Spec: ds.Spec.Template.Spec,

View File

@ -129,6 +129,7 @@ go_test(
"//pkg/kubelet/types:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -24,6 +24,8 @@ import (
"time"
"k8s.io/api/core/v1"
schedulerapi "k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@ -285,6 +287,20 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
expectedNodeCondition := v1.NodeMemoryPressure
expectedStarvedResource := v1.ResourceMemory
pressureTimeout := 10 * time.Minute
highPriorityClassName := f.BaseName + "-high-priority"
highPriority := int32(999999999)
BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.FeatureGates[string(features.PodPriority)] = true
@ -318,8 +334,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
}),
},
}
systemPriority := int32(2147483647)
specs[1].pod.Spec.Priority = &systemPriority
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, specs)
})
})
@ -332,6 +347,20 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
expectedNodeCondition := v1.NodeDiskPressure
expectedStarvedResource := v1.ResourceEphemeralStorage
pressureTimeout := 10 * time.Minute
highPriorityClassName := f.BaseName + "-high-priority"
highPriority := int32(999999999)
BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.FeatureGates[string(features.PodPriority)] = true
@ -367,8 +396,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
}),
},
}
systemPriority := int32(2147483647)
specs[1].pod.Spec.Priority = &systemPriority
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, specs)
})
})

View File

@ -42,6 +42,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
Context("DevicePlugin", func() {
var devicePluginPod *v1.Pod
var err error
BeforeEach(func() {
By("Ensuring that Nvidia GPUs exists on the node")
if !checkIfNvidiaGPUsExistOnNode() {
@ -49,7 +50,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
}
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
devicePluginPod = f.PodClient().CreateSync(framework.NVIDIADevicePlugin(f.Namespace.Name))
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(framework.NVIDIADevicePlugin())
framework.ExpectNoError(err)
By("Waiting for GPUs to become available on the local node")
Eventually(func() bool {
@ -106,7 +108,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
Expect(devId1).To(Not(Equal(devId2)))
By("Deleting device plugin.")
f.PodClient().Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
By("Waiting for GPUs to become unavailable on the local node")
Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})