fix eviction test panics

pull/8/head
David Ashpole 2018-09-07 13:35:13 -07:00
parent 361746266a
commit 4a2ef941b8
1 changed files with 16 additions and 20 deletions

View File

@ -289,16 +289,6 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
highPriorityClassName := f.BaseName + "-high-priority" highPriorityClassName := f.BaseName + "-high-priority"
highPriority := int32(999999999) highPriority := int32(999999999)
BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
memoryConsumed := resource.MustParse("600Mi") memoryConsumed := resource.MustParse("600Mi")
@ -310,6 +300,14 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
initialConfig.EvictionHard = map[string]string{"memory.available": fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))} initialConfig.EvictionHard = map[string]string{"memory.available": fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))}
initialConfig.EvictionMinimumReclaim = map[string]string{} initialConfig.EvictionMinimumReclaim = map[string]string{}
}) })
BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
specs := []podEvictSpec{ specs := []podEvictSpec{
{ {
evictionPriority: 2, evictionPriority: 2,
@ -348,16 +346,6 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
highPriorityClassName := f.BaseName + "-high-priority" highPriorityClassName := f.BaseName + "-high-priority"
highPriority := int32(999999999) highPriority := int32(999999999)
BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
diskConsumed := resource.MustParse("350Mi") diskConsumed := resource.MustParse("350Mi")
@ -369,6 +357,14 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
initialConfig.EvictionHard = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))} initialConfig.EvictionHard = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
initialConfig.EvictionMinimumReclaim = map[string]string{} initialConfig.EvictionMinimumReclaim = map[string]string{}
}) })
BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
specs := []podEvictSpec{ specs := []podEvictSpec{
{ {
evictionPriority: 2, evictionPriority: 2,