mirror of https://github.com/k3s-io/k3s
fix eviction test panics
parent
361746266a
commit
4a2ef941b8
|
@ -289,16 +289,6 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
|||
highPriorityClassName := f.BaseName + "-high-priority"
|
||||
highPriority := int32(999999999)
|
||||
|
||||
BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
memoryConsumed := resource.MustParse("600Mi")
|
||||
|
@ -310,6 +300,14 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
|||
initialConfig.EvictionHard = map[string]string{"memory.available": fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||
})
|
||||
AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 2,
|
||||
|
@ -348,16 +346,6 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
|||
highPriorityClassName := f.BaseName + "-high-priority"
|
||||
highPriority := int32(999999999)
|
||||
|
||||
BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
diskConsumed := resource.MustParse("350Mi")
|
||||
|
@ -369,6 +357,14 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
|||
initialConfig.EvictionHard = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||
})
|
||||
AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 2,
|
||||
|
|
Loading…
Reference in New Issue