mirror of https://github.com/k3s-io/k3s
Merge pull request #66369 from wackxu/fixe2e
Automatic merge from submit-queue (batch tested with PRs 61212, 66369, 66446, 66895, 66969). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. fix e2e tests which set PodPriority are failing fix https://github.com/kubernetes/kubernetes/issues/66357 ```release-note NONE ```pull/8/head
commit
ad1483b58d
|
@ -50,13 +50,13 @@ func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
|
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
|
||||||
func NVIDIADevicePlugin(ns string) *v1.Pod {
|
func NVIDIADevicePlugin() *v1.Pod {
|
||||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
p := &v1.Pod{
|
p := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
||||||
Namespace: ns,
|
Namespace: metav1.NamespaceSystem,
|
||||||
},
|
},
|
||||||
|
|
||||||
Spec: ds.Spec.Template.Spec,
|
Spec: ds.Spec.Template.Spec,
|
||||||
|
|
|
@ -129,6 +129,7 @@ go_test(
|
||||||
"//pkg/kubelet/types:go_default_library",
|
"//pkg/kubelet/types:go_default_library",
|
||||||
"//pkg/security/apparmor:go_default_library",
|
"//pkg/security/apparmor:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
|
|
|
@ -24,6 +24,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
schedulerapi "k8s.io/api/scheduling/v1beta1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
|
@ -285,6 +287,20 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||||
expectedNodeCondition := v1.NodeMemoryPressure
|
expectedNodeCondition := v1.NodeMemoryPressure
|
||||||
expectedStarvedResource := v1.ResourceMemory
|
expectedStarvedResource := v1.ResourceMemory
|
||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
|
|
||||||
|
highPriorityClassName := f.BaseName + "-high-priority"
|
||||||
|
highPriority := int32(999999999)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
|
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
initialConfig.FeatureGates[string(features.PodPriority)] = true
|
initialConfig.FeatureGates[string(features.PodPriority)] = true
|
||||||
|
@ -318,8 +334,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
systemPriority := int32(2147483647)
|
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
|
||||||
specs[1].pod.Spec.Priority = &systemPriority
|
|
||||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, specs)
|
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, specs)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -332,6 +347,20 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
||||||
expectedNodeCondition := v1.NodeDiskPressure
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
expectedStarvedResource := v1.ResourceEphemeralStorage
|
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
|
|
||||||
|
highPriorityClassName := f.BaseName + "-high-priority"
|
||||||
|
highPriority := int32(999999999)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
|
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
initialConfig.FeatureGates[string(features.PodPriority)] = true
|
initialConfig.FeatureGates[string(features.PodPriority)] = true
|
||||||
|
@ -367,8 +396,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
systemPriority := int32(2147483647)
|
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
|
||||||
specs[1].pod.Spec.Priority = &systemPriority
|
|
||||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, specs)
|
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, specs)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
|
@ -42,6 +42,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||||
|
|
||||||
Context("DevicePlugin", func() {
|
Context("DevicePlugin", func() {
|
||||||
var devicePluginPod *v1.Pod
|
var devicePluginPod *v1.Pod
|
||||||
|
var err error
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
By("Ensuring that Nvidia GPUs exists on the node")
|
By("Ensuring that Nvidia GPUs exists on the node")
|
||||||
if !checkIfNvidiaGPUsExistOnNode() {
|
if !checkIfNvidiaGPUsExistOnNode() {
|
||||||
|
@ -49,7 +50,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
|
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
|
||||||
devicePluginPod = f.PodClient().CreateSync(framework.NVIDIADevicePlugin(f.Namespace.Name))
|
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(framework.NVIDIADevicePlugin())
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
By("Waiting for GPUs to become available on the local node")
|
By("Waiting for GPUs to become available on the local node")
|
||||||
Eventually(func() bool {
|
Eventually(func() bool {
|
||||||
|
@ -106,7 +108,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||||
Expect(devId1).To(Not(Equal(devId2)))
|
Expect(devId1).To(Not(Equal(devId2)))
|
||||||
|
|
||||||
By("Deleting device plugin.")
|
By("Deleting device plugin.")
|
||||||
f.PodClient().Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
|
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
|
||||||
By("Waiting for GPUs to become unavailable on the local node")
|
By("Waiting for GPUs to become unavailable on the local node")
|
||||||
Eventually(func() bool {
|
Eventually(func() bool {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
|
|
Loading…
Reference in New Issue