2016-05-13 03:35:18 +00:00
/ *
2016-06-03 00:25:58 +00:00
Copyright 2016 The Kubernetes Authors .
2016-05-13 03:35:18 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package eviction
import (
"testing"
"time"
2017-06-22 18:24:23 +00:00
"k8s.io/api/core/v1"
2017-01-25 13:13:07 +00:00
"k8s.io/apimachinery/pkg/api/resource"
2017-01-11 14:09:48 +00:00
"k8s.io/apimachinery/pkg/types"
2017-05-19 17:57:39 +00:00
"k8s.io/apimachinery/pkg/util/clock"
2017-01-28 22:48:35 +00:00
utilfeature "k8s.io/apiserver/pkg/util/feature"
2017-01-30 18:39:54 +00:00
"k8s.io/client-go/tools/record"
2017-11-08 22:34:54 +00:00
kubeapi "k8s.io/kubernetes/pkg/apis/core"
2017-02-23 00:05:05 +00:00
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
2017-02-10 05:14:10 +00:00
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
2016-05-13 03:35:18 +00:00
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
2017-01-28 22:48:35 +00:00
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
2016-05-13 03:35:18 +00:00
)
2017-10-10 00:34:54 +00:00
const (
lowPriority = - 1
defaultPriority = 0
highPriority = 1
)
2016-05-13 03:35:18 +00:00
// mockPodKiller is used to testing which pod is killed
type mockPodKiller struct {
2016-11-18 20:50:58 +00:00
pod * v1 . Pod
status v1 . PodStatus
2016-05-13 03:35:18 +00:00
gracePeriodOverride * int64
}
// killPodNow records the pod that was killed
2016-11-18 20:50:58 +00:00
func ( m * mockPodKiller ) killPodNow ( pod * v1 . Pod , status v1 . PodStatus , gracePeriodOverride * int64 ) error {
2016-05-13 03:35:18 +00:00
m . pod = pod
m . status = status
m . gracePeriodOverride = gracePeriodOverride
return nil
}
2016-07-28 17:02:01 +00:00
// mockDiskInfoProvider is used to simulate testing.
type mockDiskInfoProvider struct {
dedicatedImageFs bool
}
// HasDedicatedImageFs returns the mocked value
func ( m * mockDiskInfoProvider ) HasDedicatedImageFs ( ) ( bool , error ) {
return m . dedicatedImageFs , nil
}
2017-07-07 20:40:13 +00:00
func newMockCapacityProvider ( capacity , reservation v1 . ResourceList ) * mockCapacityProvider {
return & mockCapacityProvider {
capacity : capacity ,
reservation : reservation ,
2017-03-02 01:56:24 +00:00
}
}
2017-07-07 20:40:13 +00:00
type mockCapacityProvider struct {
capacity v1 . ResourceList
reservation v1 . ResourceList
}
func ( m * mockCapacityProvider ) GetCapacity ( ) v1 . ResourceList {
return m . capacity
2017-03-02 01:56:24 +00:00
}
2017-07-07 20:40:13 +00:00
func ( m * mockCapacityProvider ) GetNodeAllocatableReservation ( ) v1 . ResourceList {
return m . reservation
2017-03-02 01:56:24 +00:00
}
2017-05-22 18:00:22 +00:00
// mockDiskGC is used to simulate invoking image and container garbage collection.
type mockDiskGC struct {
err error
imageBytesFreed int64
imageGCInvoked bool
containerGCInvoked bool
2016-08-03 20:55:52 +00:00
}
// DeleteUnusedImages returns the mocked values.
2017-05-22 18:00:22 +00:00
func ( m * mockDiskGC ) DeleteUnusedImages ( ) ( int64 , error ) {
m . imageGCInvoked = true
return m . imageBytesFreed , m . err
}
// DeleteAllUnusedContainers returns the mocked value
func ( m * mockDiskGC ) DeleteAllUnusedContainers ( ) error {
m . containerGCInvoked = true
return m . err
2016-08-03 20:55:52 +00:00
}
2017-10-10 00:34:54 +00:00
func makePodWithMemoryStats ( name string , priority int32 , requests v1 . ResourceList , limits v1 . ResourceList , memoryWorkingSet string ) ( * v1 . Pod , statsapi . PodStats ) {
pod := newPod ( name , priority , [ ] v1 . Container {
2016-10-14 13:02:55 +00:00
newContainer ( name , requests , limits ) ,
} , nil )
podStats := newPodMemoryStats ( pod , resource . MustParse ( memoryWorkingSet ) )
return pod , podStats
}
2017-10-10 00:34:54 +00:00
func makePodWithDiskStats ( name string , priority int32 , requests v1 . ResourceList , limits v1 . ResourceList , rootFsUsed , logsUsed , perLocalVolumeUsed string ) ( * v1 . Pod , statsapi . PodStats ) {
pod := newPod ( name , priority , [ ] v1 . Container {
2016-10-14 13:02:55 +00:00
newContainer ( name , requests , limits ) ,
} , nil )
podStats := newPodDiskStats ( pod , parseQuantity ( rootFsUsed ) , parseQuantity ( logsUsed ) , parseQuantity ( perLocalVolumeUsed ) )
return pod , podStats
}
2016-11-18 20:50:58 +00:00
func makeMemoryStats ( nodeAvailableBytes string , podStats map [ * v1 . Pod ] statsapi . PodStats ) * statsapi . Summary {
2016-10-14 13:02:55 +00:00
val := resource . MustParse ( nodeAvailableBytes )
availableBytes := uint64 ( val . Value ( ) )
WorkingSetBytes := uint64 ( val . Value ( ) )
result := & statsapi . Summary {
Node : statsapi . NodeStats {
Memory : & statsapi . MemoryStats {
AvailableBytes : & availableBytes ,
WorkingSetBytes : & WorkingSetBytes ,
} ,
} ,
Pods : [ ] statsapi . PodStats { } ,
2016-05-13 03:35:18 +00:00
}
2016-10-14 13:02:55 +00:00
for _ , podStat := range podStats {
result . Pods = append ( result . Pods , podStat )
}
return result
}
2016-11-18 20:50:58 +00:00
func makeDiskStats ( rootFsAvailableBytes , imageFsAvailableBytes string , podStats map [ * v1 . Pod ] statsapi . PodStats ) * statsapi . Summary {
2016-10-14 13:02:55 +00:00
rootFsVal := resource . MustParse ( rootFsAvailableBytes )
rootFsBytes := uint64 ( rootFsVal . Value ( ) )
rootFsCapacityBytes := uint64 ( rootFsVal . Value ( ) * 2 )
imageFsVal := resource . MustParse ( imageFsAvailableBytes )
imageFsBytes := uint64 ( imageFsVal . Value ( ) )
imageFsCapacityBytes := uint64 ( imageFsVal . Value ( ) * 2 )
result := & statsapi . Summary {
Node : statsapi . NodeStats {
Fs : & statsapi . FsStats {
AvailableBytes : & rootFsBytes ,
CapacityBytes : & rootFsCapacityBytes ,
} ,
Runtime : & statsapi . RuntimeStats {
ImageFs : & statsapi . FsStats {
AvailableBytes : & imageFsBytes ,
CapacityBytes : & imageFsCapacityBytes ,
2016-05-13 03:35:18 +00:00
} ,
} ,
2016-10-14 13:02:55 +00:00
} ,
Pods : [ ] statsapi . PodStats { } ,
}
for _ , podStat := range podStats {
result . Pods = append ( result . Pods , podStat )
2016-05-13 03:35:18 +00:00
}
2016-10-14 13:02:55 +00:00
return result
}
type podToMake struct {
name string
2017-10-10 00:34:54 +00:00
priority int32
2016-11-18 20:50:58 +00:00
requests v1 . ResourceList
limits v1 . ResourceList
2016-10-14 13:02:55 +00:00
memoryWorkingSet string
rootFsUsed string
logsFsUsed string
logsFsInodesUsed string
rootFsInodesUsed string
perLocalVolumeUsed string
perLocalVolumeInodesUsed string
}
// TestMemoryPressure
func TestMemoryPressure ( t * testing . T ) {
2017-10-10 00:34:54 +00:00
enablePodPriority ( true )
2016-10-14 13:02:55 +00:00
podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats
podsToMake := [ ] podToMake {
2017-10-10 00:34:54 +00:00
{ name : "guaranteed-low-priority-high-usage" , priority : lowPriority , requests : newResourceList ( "100m" , "1Gi" ) , limits : newResourceList ( "100m" , "1Gi" ) , memoryWorkingSet : "900Mi" } ,
{ name : "burstable-below-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , memoryWorkingSet : "50Mi" } ,
{ name : "burstable-above-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , memoryWorkingSet : "400Mi" } ,
{ name : "best-effort-high-priority-high-usage" , priority : highPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , memoryWorkingSet : "400Mi" } ,
{ name : "best-effort-low-priority-low-usage" , priority : lowPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , memoryWorkingSet : "100Mi" } ,
2016-05-13 03:35:18 +00:00
}
2016-11-18 20:50:58 +00:00
pods := [ ] * v1 . Pod { }
podStats := map [ * v1 . Pod ] statsapi . PodStats { }
2016-05-13 03:35:18 +00:00
for _ , podToMake := range podsToMake {
2017-10-10 00:34:54 +00:00
pod , podStat := podMaker ( podToMake . name , podToMake . priority , podToMake . requests , podToMake . limits , podToMake . memoryWorkingSet )
2016-05-13 03:35:18 +00:00
pods = append ( pods , pod )
podStats [ pod ] = podStat
}
2017-10-10 00:34:54 +00:00
podToEvict := pods [ 4 ]
2016-11-18 20:50:58 +00:00
activePodsFunc := func ( ) [ ] * v1 . Pod {
2016-05-13 03:35:18 +00:00
return pods
}
2016-05-26 03:08:56 +00:00
fakeClock := clock . NewFakeClock ( time . Now ( ) )
2016-05-13 03:35:18 +00:00
podKiller := & mockPodKiller { }
2016-07-28 17:02:01 +00:00
diskInfoProvider := & mockDiskInfoProvider { dedicatedImageFs : false }
2017-07-07 20:40:13 +00:00
capacityProvider := newMockCapacityProvider ( v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "3Gi" ) } , v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "1Gi" ) } )
2017-05-22 18:00:22 +00:00
imageGC := & mockDiskGC { imageBytesFreed : int64 ( 0 ) , err : nil }
2017-07-15 05:25:54 +00:00
nodeRef := & v1 . ObjectReference { Kind : "Node" , Name : "test" , UID : types . UID ( "test" ) , Namespace : "" }
2016-05-13 03:35:18 +00:00
config := Config {
2016-05-17 21:11:08 +00:00
MaxPodGracePeriodSeconds : 5 ,
2016-05-13 03:35:18 +00:00
PressureTransitionPeriod : time . Minute * 5 ,
2017-02-10 05:14:10 +00:00
Thresholds : [ ] evictionapi . Threshold {
2016-05-13 03:35:18 +00:00
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalMemoryAvailable ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2016-08-10 04:09:27 +00:00
Quantity : quantityMustParse ( "1Gi" ) ,
} ,
2016-05-13 03:35:18 +00:00
} ,
2016-05-17 21:11:08 +00:00
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalMemoryAvailable ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2016-08-10 04:09:27 +00:00
Quantity : quantityMustParse ( "2Gi" ) ,
} ,
2016-05-17 21:11:08 +00:00
GracePeriod : time . Minute * 2 ,
} ,
2016-05-13 03:35:18 +00:00
} ,
}
summaryProvider := & fakeSummaryProvider { result : summaryStatsMaker ( "2Gi" , podStats ) }
manager := & managerImpl {
clock : fakeClock ,
killPodFunc : podKiller . killPodNow ,
2016-08-03 20:55:52 +00:00
imageGC : imageGC ,
2016-05-13 03:35:18 +00:00
config : config ,
recorder : & record . FakeRecorder { } ,
summaryProvider : summaryProvider ,
nodeRef : nodeRef ,
nodeConditionsLastObservedAt : nodeConditionsObservedAt { } ,
thresholdsFirstObservedAt : thresholdsObservedAt { } ,
}
// create a best effort pod to test admission
2017-10-10 00:34:54 +00:00
bestEffortPodToAdmit , _ := podMaker ( "best-admit" , defaultPriority , newResourceList ( "" , "" ) , newResourceList ( "" , "" ) , "0Gi" )
burstablePodToAdmit , _ := podMaker ( "burst-admit" , defaultPriority , newResourceList ( "100m" , "100Mi" ) , newResourceList ( "200m" , "200Mi" ) , "0Gi" )
2016-05-13 03:35:18 +00:00
// synchronize
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-05-13 03:35:18 +00:00
// we should not have memory pressure
if manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should not report memory pressure" )
}
// try to admit our pods (they should succeed)
2017-01-26 04:39:13 +00:00
expected := [ ] bool { true , true }
for i , pod := range [ ] * v1 . Pod { bestEffortPodToAdmit , burstablePodToAdmit } {
2016-05-13 03:35:18 +00:00
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : pod } ) ; expected [ i ] != result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , pod , expected [ i ] , result . Admit )
}
}
2016-05-17 21:11:08 +00:00
// induce soft threshold
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "1500Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-05-17 21:11:08 +00:00
// we should have memory pressure
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure since soft threshold was met" )
}
// verify no pod was yet killed because there has not yet been enough time passed.
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager should not have killed a pod yet, but killed: %v" , podKiller . pod . Name )
2016-05-17 21:11:08 +00:00
}
// step forward in time pass the grace period
fakeClock . Step ( 3 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "1500Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-05-17 21:11:08 +00:00
// we should have memory pressure
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure since soft threshold was met" )
}
// verify the right pod was killed with the right grace period.
2016-10-27 20:56:55 +00:00
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
2016-05-17 21:11:08 +00:00
}
if podKiller . gracePeriodOverride == nil {
t . Errorf ( "Manager chose to kill pod but should have had a grace period override." )
}
observedGracePeriod := * podKiller . gracePeriodOverride
if observedGracePeriod != manager . config . MaxPodGracePeriodSeconds {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , manager . config . MaxPodGracePeriodSeconds , observedGracePeriod )
}
// reset state
podKiller . pod = nil
podKiller . gracePeriodOverride = nil
// remove memory pressure
fakeClock . Step ( 20 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "3Gi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-05-17 21:11:08 +00:00
// we should not have memory pressure
if manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should not report memory pressure" )
}
2016-05-13 03:35:18 +00:00
// induce memory pressure!
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "500Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-05-13 03:35:18 +00:00
// we should have memory pressure
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure" )
}
// check the right pod was killed
2016-10-27 20:56:55 +00:00
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
2016-05-13 03:35:18 +00:00
}
2016-05-17 21:11:08 +00:00
observedGracePeriod = * podKiller . gracePeriodOverride
if observedGracePeriod != int64 ( 0 ) {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
}
2016-05-13 03:35:18 +00:00
2017-01-26 04:39:13 +00:00
// the best-effort pod should not admit, burstable should
expected = [ ] bool { false , true }
for i , pod := range [ ] * v1 . Pod { bestEffortPodToAdmit , burstablePodToAdmit } {
2016-05-13 03:35:18 +00:00
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : pod } ) ; expected [ i ] != result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , pod , expected [ i ] , result . Admit )
}
}
// reduce memory pressure
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "2Gi" , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-05-13 03:35:18 +00:00
// we should have memory pressure (because transition period not yet met)
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure" )
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-05-13 03:35:18 +00:00
}
2017-01-26 04:39:13 +00:00
// the best-effort pod should not admit, burstable should
expected = [ ] bool { false , true }
for i , pod := range [ ] * v1 . Pod { bestEffortPodToAdmit , burstablePodToAdmit } {
2016-05-13 03:35:18 +00:00
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : pod } ) ; expected [ i ] != result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , pod , expected [ i ] , result . Admit )
}
}
// move the clock past transition period to ensure that we stop reporting pressure
fakeClock . Step ( 5 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "2Gi" , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-05-13 03:35:18 +00:00
// we should not have memory pressure (because transition period met)
if manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should not report memory pressure" )
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-05-13 03:35:18 +00:00
}
// all pods should admit now
2017-01-26 04:39:13 +00:00
expected = [ ] bool { true , true }
for i , pod := range [ ] * v1 . Pod { bestEffortPodToAdmit , burstablePodToAdmit } {
2016-05-13 03:35:18 +00:00
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : pod } ) ; expected [ i ] != result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , pod , expected [ i ] , result . Admit )
}
}
}
2016-07-22 19:23:09 +00:00
// parseQuantity parses the specified value (if provided) otherwise returns 0 value
func parseQuantity ( value string ) resource . Quantity {
if len ( value ) == 0 {
return resource . MustParse ( "0" )
}
return resource . MustParse ( value )
}
func TestDiskPressureNodeFs ( t * testing . T ) {
2017-10-10 00:34:54 +00:00
enablePodPriority ( true )
2016-10-14 13:02:55 +00:00
podMaker := makePodWithDiskStats
summaryStatsMaker := makeDiskStats
podsToMake := [ ] podToMake {
2017-10-10 00:34:54 +00:00
{ name : "low-priority-high-usage" , priority : lowPriority , requests : newResourceList ( "100m" , "1Gi" ) , limits : newResourceList ( "100m" , "1Gi" ) , rootFsUsed : "900Mi" } ,
{ name : "below-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , logsFsUsed : "50Mi" } ,
{ name : "above-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , rootFsUsed : "400Mi" } ,
{ name : "high-priority-high-usage" , priority : highPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , perLocalVolumeUsed : "400Mi" } ,
{ name : "low-priority-low-usage" , priority : lowPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , rootFsUsed : "100Mi" } ,
2016-07-22 19:23:09 +00:00
}
2016-11-18 20:50:58 +00:00
pods := [ ] * v1 . Pod { }
podStats := map [ * v1 . Pod ] statsapi . PodStats { }
2016-07-22 19:23:09 +00:00
for _ , podToMake := range podsToMake {
2017-10-10 00:34:54 +00:00
pod , podStat := podMaker ( podToMake . name , podToMake . priority , podToMake . requests , podToMake . limits , podToMake . rootFsUsed , podToMake . logsFsUsed , podToMake . perLocalVolumeUsed )
2016-07-22 19:23:09 +00:00
pods = append ( pods , pod )
podStats [ pod ] = podStat
}
2017-10-10 00:34:54 +00:00
podToEvict := pods [ 0 ]
2016-11-18 20:50:58 +00:00
activePodsFunc := func ( ) [ ] * v1 . Pod {
2016-07-22 19:23:09 +00:00
return pods
}
2016-07-28 17:02:01 +00:00
fakeClock := clock . NewFakeClock ( time . Now ( ) )
2016-07-22 19:23:09 +00:00
podKiller := & mockPodKiller { }
2016-07-28 17:02:01 +00:00
diskInfoProvider := & mockDiskInfoProvider { dedicatedImageFs : false }
2017-07-07 20:40:13 +00:00
capacityProvider := newMockCapacityProvider ( v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "3Gi" ) } , v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "1Gi" ) } )
2017-05-22 18:00:22 +00:00
diskGC := & mockDiskGC { imageBytesFreed : int64 ( 0 ) , err : nil }
2017-07-15 05:25:54 +00:00
nodeRef := & v1 . ObjectReference { Kind : "Node" , Name : "test" , UID : types . UID ( "test" ) , Namespace : "" }
2016-07-22 19:23:09 +00:00
config := Config {
MaxPodGracePeriodSeconds : 5 ,
PressureTransitionPeriod : time . Minute * 5 ,
2017-02-10 05:14:10 +00:00
Thresholds : [ ] evictionapi . Threshold {
2016-07-22 19:23:09 +00:00
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalNodeFsAvailable ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2016-08-10 04:09:27 +00:00
Quantity : quantityMustParse ( "1Gi" ) ,
} ,
2016-07-22 19:23:09 +00:00
} ,
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalNodeFsAvailable ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2016-08-10 04:09:27 +00:00
Quantity : quantityMustParse ( "2Gi" ) ,
} ,
2016-07-22 19:23:09 +00:00
GracePeriod : time . Minute * 2 ,
} ,
} ,
}
summaryProvider := & fakeSummaryProvider { result : summaryStatsMaker ( "16Gi" , "200Gi" , podStats ) }
manager := & managerImpl {
clock : fakeClock ,
killPodFunc : podKiller . killPodNow ,
2017-05-22 18:00:22 +00:00
imageGC : diskGC ,
containerGC : diskGC ,
2016-07-22 19:23:09 +00:00
config : config ,
recorder : & record . FakeRecorder { } ,
summaryProvider : summaryProvider ,
nodeRef : nodeRef ,
nodeConditionsLastObservedAt : nodeConditionsObservedAt { } ,
thresholdsFirstObservedAt : thresholdsObservedAt { } ,
}
// create a best effort pod to test admission
2017-10-10 00:34:54 +00:00
podToAdmit , _ := podMaker ( "pod-to-admit" , defaultPriority , newResourceList ( "" , "" ) , newResourceList ( "" , "" ) , "0Gi" , "0Gi" , "0Gi" )
2016-07-22 19:23:09 +00:00
// synchronize
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-07-22 19:23:09 +00:00
// we should not have disk pressure
if manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should not report disk pressure" )
}
// try to admit our pod (should succeed)
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : podToAdmit } ) ; ! result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , podToAdmit , true , result . Admit )
}
// induce soft threshold
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "1.5Gi" , "200Gi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-07-22 19:23:09 +00:00
// we should have disk pressure
if ! manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should report disk pressure since soft threshold was met" )
}
// verify no pod was yet killed because there has not yet been enough time passed.
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager should not have killed a pod yet, but killed: %v" , podKiller . pod . Name )
2016-07-22 19:23:09 +00:00
}
// step forward in time pass the grace period
fakeClock . Step ( 3 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "1.5Gi" , "200Gi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-07-22 19:23:09 +00:00
// we should have disk pressure
if ! manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should report disk pressure since soft threshold was met" )
}
// verify the right pod was killed with the right grace period.
2016-10-27 20:56:55 +00:00
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
2016-07-22 19:23:09 +00:00
}
if podKiller . gracePeriodOverride == nil {
t . Errorf ( "Manager chose to kill pod but should have had a grace period override." )
}
observedGracePeriod := * podKiller . gracePeriodOverride
if observedGracePeriod != manager . config . MaxPodGracePeriodSeconds {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , manager . config . MaxPodGracePeriodSeconds , observedGracePeriod )
}
// reset state
podKiller . pod = nil
podKiller . gracePeriodOverride = nil
// remove disk pressure
fakeClock . Step ( 20 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "16Gi" , "200Gi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-07-22 19:23:09 +00:00
// we should not have disk pressure
if manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should not report disk pressure" )
}
// induce disk pressure!
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "500Mi" , "200Gi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-07-22 19:23:09 +00:00
// we should have disk pressure
if ! manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should report disk pressure" )
}
// check the right pod was killed
2016-10-27 20:56:55 +00:00
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
2016-07-22 19:23:09 +00:00
}
observedGracePeriod = * podKiller . gracePeriodOverride
if observedGracePeriod != int64 ( 0 ) {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
}
// try to admit our pod (should fail)
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : podToAdmit } ) ; result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , podToAdmit , false , result . Admit )
}
// reduce disk pressure
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "16Gi" , "200Gi" , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-07-22 19:23:09 +00:00
// we should have disk pressure (because transition period not yet met)
if ! manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should report disk pressure" )
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-07-22 19:23:09 +00:00
}
// try to admit our pod (should fail)
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : podToAdmit } ) ; result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , podToAdmit , false , result . Admit )
}
// move the clock past transition period to ensure that we stop reporting pressure
fakeClock . Step ( 5 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "16Gi" , "200Gi" , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-07-22 19:23:09 +00:00
// we should not have disk pressure (because transition period met)
if manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should not report disk pressure" )
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-07-22 19:23:09 +00:00
}
// try to admit our pod (should succeed)
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : podToAdmit } ) ; ! result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , podToAdmit , true , result . Admit )
}
}
2016-08-03 02:58:48 +00:00
// TestMinReclaim verifies that min-reclaim works as desired.
func TestMinReclaim ( t * testing . T ) {
2017-10-10 00:34:54 +00:00
enablePodPriority ( true )
2016-10-14 13:02:55 +00:00
podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats
podsToMake := [ ] podToMake {
2017-10-10 00:34:54 +00:00
{ name : "guaranteed-low-priority-high-usage" , priority : lowPriority , requests : newResourceList ( "100m" , "1Gi" ) , limits : newResourceList ( "100m" , "1Gi" ) , memoryWorkingSet : "900Mi" } ,
{ name : "burstable-below-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , memoryWorkingSet : "50Mi" } ,
{ name : "burstable-above-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , memoryWorkingSet : "400Mi" } ,
{ name : "best-effort-high-priority-high-usage" , priority : highPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , memoryWorkingSet : "400Mi" } ,
{ name : "best-effort-low-priority-low-usage" , priority : lowPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , memoryWorkingSet : "100Mi" } ,
2016-08-03 02:58:48 +00:00
}
2016-11-18 20:50:58 +00:00
pods := [ ] * v1 . Pod { }
podStats := map [ * v1 . Pod ] statsapi . PodStats { }
2016-08-03 02:58:48 +00:00
for _ , podToMake := range podsToMake {
2017-10-10 00:34:54 +00:00
pod , podStat := podMaker ( podToMake . name , podToMake . priority , podToMake . requests , podToMake . limits , podToMake . memoryWorkingSet )
2016-08-03 02:58:48 +00:00
pods = append ( pods , pod )
podStats [ pod ] = podStat
}
2017-10-10 00:34:54 +00:00
podToEvict := pods [ 4 ]
2016-11-18 20:50:58 +00:00
activePodsFunc := func ( ) [ ] * v1 . Pod {
2016-08-03 02:58:48 +00:00
return pods
}
fakeClock := clock . NewFakeClock ( time . Now ( ) )
podKiller := & mockPodKiller { }
diskInfoProvider := & mockDiskInfoProvider { dedicatedImageFs : false }
2017-07-07 20:40:13 +00:00
capacityProvider := newMockCapacityProvider ( v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "3Gi" ) } , v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "1Gi" ) } )
2017-05-22 18:00:22 +00:00
diskGC := & mockDiskGC { imageBytesFreed : int64 ( 0 ) , err : nil }
2017-07-15 05:25:54 +00:00
nodeRef := & v1 . ObjectReference { Kind : "Node" , Name : "test" , UID : types . UID ( "test" ) , Namespace : "" }
2016-08-03 02:58:48 +00:00
config := Config {
MaxPodGracePeriodSeconds : 5 ,
PressureTransitionPeriod : time . Minute * 5 ,
2017-02-10 05:14:10 +00:00
Thresholds : [ ] evictionapi . Threshold {
2016-08-03 02:58:48 +00:00
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalMemoryAvailable ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2016-08-10 04:09:27 +00:00
Quantity : quantityMustParse ( "1Gi" ) ,
} ,
2017-02-10 05:14:10 +00:00
MinReclaim : & evictionapi . ThresholdValue {
2016-09-23 20:36:03 +00:00
Quantity : quantityMustParse ( "500Mi" ) ,
} ,
2016-08-03 02:58:48 +00:00
} ,
} ,
}
summaryProvider := & fakeSummaryProvider { result : summaryStatsMaker ( "2Gi" , podStats ) }
manager := & managerImpl {
clock : fakeClock ,
killPodFunc : podKiller . killPodNow ,
2017-05-22 18:00:22 +00:00
imageGC : diskGC ,
containerGC : diskGC ,
2016-08-03 02:58:48 +00:00
config : config ,
recorder : & record . FakeRecorder { } ,
summaryProvider : summaryProvider ,
nodeRef : nodeRef ,
nodeConditionsLastObservedAt : nodeConditionsObservedAt { } ,
thresholdsFirstObservedAt : thresholdsObservedAt { } ,
}
// synchronize
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 02:58:48 +00:00
// we should not have memory pressure
if manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should not report memory pressure" )
}
// induce memory pressure!
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "500Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 02:58:48 +00:00
// we should have memory pressure
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure" )
}
// check the right pod was killed
2016-10-27 20:56:55 +00:00
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
2016-08-03 02:58:48 +00:00
}
observedGracePeriod := * podKiller . gracePeriodOverride
if observedGracePeriod != int64 ( 0 ) {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
}
// reduce memory pressure, but not below the min-reclaim amount
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "1.2Gi" , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 02:58:48 +00:00
// we should have memory pressure (because transition period not yet met)
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure" )
}
// check the right pod was killed
2016-10-27 20:56:55 +00:00
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
2016-08-03 02:58:48 +00:00
}
observedGracePeriod = * podKiller . gracePeriodOverride
if observedGracePeriod != int64 ( 0 ) {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
}
// reduce memory pressure and ensure the min-reclaim amount
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "2Gi" , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 02:58:48 +00:00
// we should have memory pressure (because transition period not yet met)
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure" )
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-08-03 02:58:48 +00:00
}
// move the clock past transition period to ensure that we stop reporting pressure
fakeClock . Step ( 5 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "2Gi" , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 02:58:48 +00:00
// we should not have memory pressure (because transition period met)
if manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should not report memory pressure" )
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-08-03 02:58:48 +00:00
}
}
2016-08-03 20:55:52 +00:00
func TestNodeReclaimFuncs ( t * testing . T ) {
2017-10-10 00:34:54 +00:00
enablePodPriority ( true )
2016-10-14 13:02:55 +00:00
podMaker := makePodWithDiskStats
summaryStatsMaker := makeDiskStats
podsToMake := [ ] podToMake {
2017-10-10 00:34:54 +00:00
{ name : "low-priority-high-usage" , priority : lowPriority , requests : newResourceList ( "100m" , "1Gi" ) , limits : newResourceList ( "100m" , "1Gi" ) , rootFsUsed : "900Mi" } ,
{ name : "below-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , logsFsUsed : "50Mi" } ,
{ name : "above-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , rootFsUsed : "400Mi" } ,
{ name : "high-priority-high-usage" , priority : highPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , perLocalVolumeUsed : "400Mi" } ,
{ name : "low-priority-low-usage" , priority : lowPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , rootFsUsed : "100Mi" } ,
2016-08-03 20:55:52 +00:00
}
2016-11-18 20:50:58 +00:00
pods := [ ] * v1 . Pod { }
podStats := map [ * v1 . Pod ] statsapi . PodStats { }
2016-08-03 20:55:52 +00:00
for _ , podToMake := range podsToMake {
2017-10-10 00:34:54 +00:00
pod , podStat := podMaker ( podToMake . name , podToMake . priority , podToMake . requests , podToMake . limits , podToMake . rootFsUsed , podToMake . logsFsUsed , podToMake . perLocalVolumeUsed )
2016-08-03 20:55:52 +00:00
pods = append ( pods , pod )
podStats [ pod ] = podStat
}
2017-10-10 00:34:54 +00:00
podToEvict := pods [ 0 ]
2016-11-18 20:50:58 +00:00
activePodsFunc := func ( ) [ ] * v1 . Pod {
2016-08-03 20:55:52 +00:00
return pods
}
fakeClock := clock . NewFakeClock ( time . Now ( ) )
podKiller := & mockPodKiller { }
diskInfoProvider := & mockDiskInfoProvider { dedicatedImageFs : false }
2017-07-07 20:40:13 +00:00
capacityProvider := newMockCapacityProvider ( v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "3Gi" ) } , v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "1Gi" ) } )
2016-08-03 20:55:52 +00:00
imageGcFree := resource . MustParse ( "700Mi" )
2017-05-22 18:00:22 +00:00
diskGC := & mockDiskGC { imageBytesFreed : imageGcFree . Value ( ) , err : nil }
2017-07-15 05:25:54 +00:00
nodeRef := & v1 . ObjectReference { Kind : "Node" , Name : "test" , UID : types . UID ( "test" ) , Namespace : "" }
2016-08-03 20:55:52 +00:00
config := Config {
MaxPodGracePeriodSeconds : 5 ,
PressureTransitionPeriod : time . Minute * 5 ,
2017-02-10 05:14:10 +00:00
Thresholds : [ ] evictionapi . Threshold {
2016-08-03 20:55:52 +00:00
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalNodeFsAvailable ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2016-08-10 04:09:27 +00:00
Quantity : quantityMustParse ( "1Gi" ) ,
} ,
2017-02-10 05:14:10 +00:00
MinReclaim : & evictionapi . ThresholdValue {
2016-09-23 20:36:03 +00:00
Quantity : quantityMustParse ( "500Mi" ) ,
} ,
2016-08-03 20:55:52 +00:00
} ,
} ,
}
summaryProvider := & fakeSummaryProvider { result : summaryStatsMaker ( "16Gi" , "200Gi" , podStats ) }
manager := & managerImpl {
clock : fakeClock ,
killPodFunc : podKiller . killPodNow ,
2017-05-22 18:00:22 +00:00
imageGC : diskGC ,
containerGC : diskGC ,
2016-08-03 20:55:52 +00:00
config : config ,
recorder : & record . FakeRecorder { } ,
summaryProvider : summaryProvider ,
nodeRef : nodeRef ,
nodeConditionsLastObservedAt : nodeConditionsObservedAt { } ,
thresholdsFirstObservedAt : thresholdsObservedAt { } ,
}
// synchronize
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 20:55:52 +00:00
// we should not have disk pressure
if manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should not report disk pressure" )
}
// induce hard threshold
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( ".9Gi" , "200Gi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 20:55:52 +00:00
// we should have disk pressure
if ! manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should report disk pressure since soft threshold was met" )
}
// verify image gc was invoked
2017-05-22 18:00:22 +00:00
if ! diskGC . imageGCInvoked || ! diskGC . containerGCInvoked {
2016-08-03 20:55:52 +00:00
t . Errorf ( "Manager should have invoked image gc" )
}
// verify no pod was killed because image gc was sufficient
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager should not have killed a pod, but killed: %v" , podKiller . pod . Name )
2016-08-03 20:55:52 +00:00
}
// reset state
2017-05-22 18:00:22 +00:00
diskGC . imageGCInvoked = false
diskGC . containerGCInvoked = false
2016-08-03 20:55:52 +00:00
// remove disk pressure
fakeClock . Step ( 20 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "16Gi" , "200Gi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 20:55:52 +00:00
// we should not have disk pressure
if manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should not report disk pressure" )
}
// induce disk pressure!
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "400Mi" , "200Gi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 20:55:52 +00:00
// we should have disk pressure
if ! manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should report disk pressure" )
}
2017-05-22 18:00:22 +00:00
// ensure disk gc was invoked
if ! diskGC . imageGCInvoked || ! diskGC . containerGCInvoked {
2016-08-03 20:55:52 +00:00
t . Errorf ( "Manager should have invoked image gc" )
}
// check the right pod was killed
2016-10-27 20:56:55 +00:00
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
2016-08-03 20:55:52 +00:00
}
observedGracePeriod := * podKiller . gracePeriodOverride
if observedGracePeriod != int64 ( 0 ) {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
}
// reduce disk pressure
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "16Gi" , "200Gi" , podStats )
2017-05-22 18:00:22 +00:00
diskGC . imageGCInvoked = false // reset state
diskGC . containerGCInvoked = false // reset state
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 20:55:52 +00:00
// we should have disk pressure (because transition period not yet met)
if ! manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should report disk pressure" )
}
// no image gc should have occurred
2017-05-22 18:00:22 +00:00
if diskGC . imageGCInvoked || diskGC . containerGCInvoked {
2016-08-03 20:55:52 +00:00
t . Errorf ( "Manager chose to perform image gc when it was not neeed" )
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-08-03 20:55:52 +00:00
}
// move the clock past transition period to ensure that we stop reporting pressure
fakeClock . Step ( 5 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "16Gi" , "200Gi" , podStats )
2017-05-22 18:00:22 +00:00
diskGC . imageGCInvoked = false // reset state
diskGC . containerGCInvoked = false // reset state
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-03 20:55:52 +00:00
// we should not have disk pressure (because transition period met)
if manager . IsUnderDiskPressure ( ) {
t . Errorf ( "Manager should not report disk pressure" )
}
// no image gc should have occurred
2017-05-22 18:00:22 +00:00
if diskGC . imageGCInvoked || diskGC . containerGCInvoked {
2016-08-03 20:55:52 +00:00
t . Errorf ( "Manager chose to perform image gc when it was not neeed" )
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-08-03 20:55:52 +00:00
}
}
2016-08-09 21:34:11 +00:00
2016-10-27 20:56:55 +00:00
func TestInodePressureNodeFsInodes ( t * testing . T ) {
2017-10-10 00:34:54 +00:00
enablePodPriority ( true )
podMaker := func ( name string , priority int32 , requests v1 . ResourceList , limits v1 . ResourceList , rootInodes , logInodes , volumeInodes string ) ( * v1 . Pod , statsapi . PodStats ) {
pod := newPod ( name , priority , [ ] v1 . Container {
2016-08-09 21:34:11 +00:00
newContainer ( name , requests , limits ) ,
} , nil )
2016-10-27 20:56:55 +00:00
podStats := newPodInodeStats ( pod , parseQuantity ( rootInodes ) , parseQuantity ( logInodes ) , parseQuantity ( volumeInodes ) )
2016-08-09 21:34:11 +00:00
return pod , podStats
}
2016-11-18 20:50:58 +00:00
summaryStatsMaker := func ( rootFsInodesFree , rootFsInodes string , podStats map [ * v1 . Pod ] statsapi . PodStats ) * statsapi . Summary {
2016-08-09 21:34:11 +00:00
rootFsInodesFreeVal := resource . MustParse ( rootFsInodesFree )
internalRootFsInodesFree := uint64 ( rootFsInodesFreeVal . Value ( ) )
rootFsInodesVal := resource . MustParse ( rootFsInodes )
internalRootFsInodes := uint64 ( rootFsInodesVal . Value ( ) )
result := & statsapi . Summary {
Node : statsapi . NodeStats {
Fs : & statsapi . FsStats {
InodesFree : & internalRootFsInodesFree ,
Inodes : & internalRootFsInodes ,
} ,
} ,
Pods : [ ] statsapi . PodStats { } ,
}
for _ , podStat := range podStats {
result . Pods = append ( result . Pods , podStat )
}
return result
}
2016-10-14 13:02:55 +00:00
podsToMake := [ ] podToMake {
2017-10-10 00:34:54 +00:00
{ name : "low-priority-high-usage" , priority : lowPriority , requests : newResourceList ( "100m" , "1Gi" ) , limits : newResourceList ( "100m" , "1Gi" ) , rootFsInodesUsed : "900Mi" } ,
{ name : "below-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , rootFsInodesUsed : "50Mi" } ,
{ name : "above-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , rootFsInodesUsed : "400Mi" } ,
{ name : "high-priority-high-usage" , priority : highPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , rootFsInodesUsed : "400Mi" } ,
{ name : "low-priority-low-usage" , priority : lowPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , rootFsInodesUsed : "100Mi" } ,
2016-08-09 21:34:11 +00:00
}
2016-11-18 20:50:58 +00:00
pods := [ ] * v1 . Pod { }
podStats := map [ * v1 . Pod ] statsapi . PodStats { }
2016-08-09 21:34:11 +00:00
for _ , podToMake := range podsToMake {
2017-10-10 00:34:54 +00:00
pod , podStat := podMaker ( podToMake . name , podToMake . priority , podToMake . requests , podToMake . limits , podToMake . rootFsInodesUsed , podToMake . logsFsInodesUsed , podToMake . perLocalVolumeInodesUsed )
2016-08-09 21:34:11 +00:00
pods = append ( pods , pod )
podStats [ pod ] = podStat
}
2017-10-10 00:34:54 +00:00
podToEvict := pods [ 0 ]
2016-11-18 20:50:58 +00:00
activePodsFunc := func ( ) [ ] * v1 . Pod {
2016-08-09 21:34:11 +00:00
return pods
}
fakeClock := clock . NewFakeClock ( time . Now ( ) )
podKiller := & mockPodKiller { }
diskInfoProvider := & mockDiskInfoProvider { dedicatedImageFs : false }
2017-07-07 20:40:13 +00:00
capacityProvider := newMockCapacityProvider ( v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "3Gi" ) } , v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "1Gi" ) } )
2017-05-22 18:00:22 +00:00
diskGC := & mockDiskGC { imageBytesFreed : int64 ( 0 ) , err : nil }
2017-07-15 05:25:54 +00:00
nodeRef := & v1 . ObjectReference { Kind : "Node" , Name : "test" , UID : types . UID ( "test" ) , Namespace : "" }
2016-08-09 21:34:11 +00:00
config := Config {
MaxPodGracePeriodSeconds : 5 ,
PressureTransitionPeriod : time . Minute * 5 ,
2017-02-10 05:14:10 +00:00
Thresholds : [ ] evictionapi . Threshold {
2016-08-09 21:34:11 +00:00
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalNodeFsInodesFree ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2016-08-09 21:34:11 +00:00
Quantity : quantityMustParse ( "1Mi" ) ,
} ,
} ,
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalNodeFsInodesFree ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2016-08-09 21:34:11 +00:00
Quantity : quantityMustParse ( "2Mi" ) ,
} ,
GracePeriod : time . Minute * 2 ,
} ,
} ,
}
summaryProvider := & fakeSummaryProvider { result : summaryStatsMaker ( "3Mi" , "4Mi" , podStats ) }
manager := & managerImpl {
clock : fakeClock ,
killPodFunc : podKiller . killPodNow ,
2017-05-22 18:00:22 +00:00
imageGC : diskGC ,
containerGC : diskGC ,
2016-08-09 21:34:11 +00:00
config : config ,
recorder : & record . FakeRecorder { } ,
summaryProvider : summaryProvider ,
nodeRef : nodeRef ,
nodeConditionsLastObservedAt : nodeConditionsObservedAt { } ,
thresholdsFirstObservedAt : thresholdsObservedAt { } ,
}
// create a best effort pod to test admission
2017-10-10 00:34:54 +00:00
podToAdmit , _ := podMaker ( "pod-to-admit" , defaultPriority , newResourceList ( "" , "" ) , newResourceList ( "" , "" ) , "0" , "0" , "0" )
2016-08-09 21:34:11 +00:00
// synchronize
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-09 21:34:11 +00:00
2016-11-04 15:29:27 +00:00
// we should not have disk pressure
if manager . IsUnderDiskPressure ( ) {
2016-09-22 17:51:19 +00:00
t . Errorf ( "Manager should not report inode pressure" )
2016-08-09 21:34:11 +00:00
}
// try to admit our pod (should succeed)
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : podToAdmit } ) ; ! result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , podToAdmit , true , result . Admit )
}
// induce soft threshold
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "1.5Mi" , "4Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-09 21:34:11 +00:00
2016-11-04 15:29:27 +00:00
// we should have disk pressure
if ! manager . IsUnderDiskPressure ( ) {
2016-09-22 17:51:19 +00:00
t . Errorf ( "Manager should report inode pressure since soft threshold was met" )
2016-08-09 21:34:11 +00:00
}
// verify no pod was yet killed because there has not yet been enough time passed.
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager should not have killed a pod yet, but killed: %v" , podKiller . pod . Name )
2016-08-09 21:34:11 +00:00
}
// step forward in time pass the grace period
fakeClock . Step ( 3 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "1.5Mi" , "4Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-09 21:34:11 +00:00
2016-11-04 15:29:27 +00:00
// we should have disk pressure
if ! manager . IsUnderDiskPressure ( ) {
2016-09-22 17:51:19 +00:00
t . Errorf ( "Manager should report inode pressure since soft threshold was met" )
2016-08-09 21:34:11 +00:00
}
// verify the right pod was killed with the right grace period.
2016-10-27 20:56:55 +00:00
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
2016-08-09 21:34:11 +00:00
}
if podKiller . gracePeriodOverride == nil {
t . Errorf ( "Manager chose to kill pod but should have had a grace period override." )
}
observedGracePeriod := * podKiller . gracePeriodOverride
if observedGracePeriod != manager . config . MaxPodGracePeriodSeconds {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , manager . config . MaxPodGracePeriodSeconds , observedGracePeriod )
}
// reset state
podKiller . pod = nil
podKiller . gracePeriodOverride = nil
2016-09-22 17:51:19 +00:00
// remove inode pressure
2016-08-09 21:34:11 +00:00
fakeClock . Step ( 20 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "3Mi" , "4Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-09 21:34:11 +00:00
2016-11-04 15:29:27 +00:00
// we should not have disk pressure
if manager . IsUnderDiskPressure ( ) {
2016-09-22 17:51:19 +00:00
t . Errorf ( "Manager should not report inode pressure" )
2016-08-09 21:34:11 +00:00
}
2016-09-22 17:51:19 +00:00
// induce inode pressure!
2016-08-09 21:34:11 +00:00
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "0.5Mi" , "4Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-09 21:34:11 +00:00
2016-11-04 15:29:27 +00:00
// we should have disk pressure
if ! manager . IsUnderDiskPressure ( ) {
2016-09-22 17:51:19 +00:00
t . Errorf ( "Manager should report inode pressure" )
2016-08-09 21:34:11 +00:00
}
// check the right pod was killed
2016-10-27 20:56:55 +00:00
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
2016-08-09 21:34:11 +00:00
}
observedGracePeriod = * podKiller . gracePeriodOverride
if observedGracePeriod != int64 ( 0 ) {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
}
// try to admit our pod (should fail)
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : podToAdmit } ) ; result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , podToAdmit , false , result . Admit )
}
2016-09-22 17:51:19 +00:00
// reduce inode pressure
2016-08-09 21:34:11 +00:00
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "3Mi" , "4Mi" , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-09 21:34:11 +00:00
2016-11-04 15:29:27 +00:00
// we should have disk pressure (because transition period not yet met)
if ! manager . IsUnderDiskPressure ( ) {
2016-09-22 17:51:19 +00:00
t . Errorf ( "Manager should report inode pressure" )
2016-08-09 21:34:11 +00:00
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-08-09 21:34:11 +00:00
}
// try to admit our pod (should fail)
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : podToAdmit } ) ; result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , podToAdmit , false , result . Admit )
}
// move the clock past transition period to ensure that we stop reporting pressure
fakeClock . Step ( 5 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "3Mi" , "4Mi" , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2016-08-09 21:34:11 +00:00
2016-11-04 15:29:27 +00:00
// we should not have disk pressure (because transition period met)
if manager . IsUnderDiskPressure ( ) {
2016-09-22 17:51:19 +00:00
t . Errorf ( "Manager should not report inode pressure" )
2016-08-09 21:34:11 +00:00
}
// no pod should have been killed
if podKiller . pod != nil {
2016-10-27 20:56:55 +00:00
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
2016-08-09 21:34:11 +00:00
}
// try to admit our pod (should succeed)
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : podToAdmit } ) ; ! result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , podToAdmit , true , result . Admit )
}
}
2017-01-28 22:48:35 +00:00
// TestCriticalPodsAreNotEvicted
func TestCriticalPodsAreNotEvicted ( t * testing . T ) {
2017-10-10 00:34:54 +00:00
enablePodPriority ( true )
2017-01-28 22:48:35 +00:00
podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats
podsToMake := [ ] podToMake {
2017-10-10 00:34:54 +00:00
{ name : "critical" , priority : defaultPriority , requests : newResourceList ( "100m" , "1Gi" ) , limits : newResourceList ( "100m" , "1Gi" ) , memoryWorkingSet : "800Mi" } ,
2017-01-28 22:48:35 +00:00
}
pods := [ ] * v1 . Pod { }
podStats := map [ * v1 . Pod ] statsapi . PodStats { }
for _ , podToMake := range podsToMake {
2017-10-10 00:34:54 +00:00
pod , podStat := podMaker ( podToMake . name , podToMake . priority , podToMake . requests , podToMake . limits , podToMake . memoryWorkingSet )
2017-01-28 22:48:35 +00:00
pods = append ( pods , pod )
podStats [ pod ] = podStat
}
// Mark the pod as critical
pods [ 0 ] . Annotations = map [ string ] string {
2017-02-19 03:16:10 +00:00
kubelettypes . CriticalPodAnnotationKey : "" ,
kubelettypes . ConfigSourceAnnotationKey : kubelettypes . FileSource ,
2017-01-28 22:48:35 +00:00
}
pods [ 0 ] . Namespace = kubeapi . NamespaceSystem
podToEvict := pods [ 0 ]
activePodsFunc := func ( ) [ ] * v1 . Pod {
return pods
}
fakeClock := clock . NewFakeClock ( time . Now ( ) )
podKiller := & mockPodKiller { }
diskInfoProvider := & mockDiskInfoProvider { dedicatedImageFs : false }
2017-07-07 20:40:13 +00:00
capacityProvider := newMockCapacityProvider ( v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "3Gi" ) } , v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "1Gi" ) } )
2017-05-22 18:00:22 +00:00
diskGC := & mockDiskGC { imageBytesFreed : int64 ( 0 ) , err : nil }
2017-07-15 05:25:54 +00:00
nodeRef := & v1 . ObjectReference {
2017-01-28 22:48:35 +00:00
Kind : "Node" , Name : "test" , UID : types . UID ( "test" ) , Namespace : "" ,
}
config := Config {
MaxPodGracePeriodSeconds : 5 ,
PressureTransitionPeriod : time . Minute * 5 ,
2017-02-10 05:14:10 +00:00
Thresholds : [ ] evictionapi . Threshold {
2017-01-28 22:48:35 +00:00
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalMemoryAvailable ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2017-01-28 22:48:35 +00:00
Quantity : quantityMustParse ( "1Gi" ) ,
} ,
} ,
{
2017-02-10 05:14:10 +00:00
Signal : evictionapi . SignalMemoryAvailable ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
2017-01-28 22:48:35 +00:00
Quantity : quantityMustParse ( "2Gi" ) ,
} ,
GracePeriod : time . Minute * 2 ,
} ,
} ,
}
summaryProvider := & fakeSummaryProvider { result : summaryStatsMaker ( "2Gi" , podStats ) }
manager := & managerImpl {
clock : fakeClock ,
killPodFunc : podKiller . killPodNow ,
2017-05-22 18:00:22 +00:00
imageGC : diskGC ,
containerGC : diskGC ,
2017-01-28 22:48:35 +00:00
config : config ,
recorder : & record . FakeRecorder { } ,
summaryProvider : summaryProvider ,
nodeRef : nodeRef ,
nodeConditionsLastObservedAt : nodeConditionsObservedAt { } ,
thresholdsFirstObservedAt : thresholdsObservedAt { } ,
}
// Enable critical pod annotation feature gate
utilfeature . DefaultFeatureGate . Set ( "ExperimentalCriticalPodAnnotation=True" )
// induce soft threshold
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "1500Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2017-01-28 22:48:35 +00:00
// we should have memory pressure
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure since soft threshold was met" )
}
// verify no pod was yet killed because there has not yet been enough time passed.
if podKiller . pod != nil {
t . Errorf ( "Manager should not have killed a pod yet, but killed: %v" , podKiller . pod . Name )
}
// step forward in time pass the grace period
fakeClock . Step ( 3 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "1500Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2017-01-28 22:48:35 +00:00
// we should have memory pressure
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure since soft threshold was met" )
}
// verify the right pod was killed with the right grace period.
if podKiller . pod == podToEvict {
t . Errorf ( "Manager chose to kill critical pod: %v, but should have ignored it" , podKiller . pod . Name )
}
// reset state
podKiller . pod = nil
podKiller . gracePeriodOverride = nil
// remove memory pressure
fakeClock . Step ( 20 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "3Gi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2017-01-28 22:48:35 +00:00
// we should not have memory pressure
if manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should not report memory pressure" )
}
// Disable critical pod annotation feature gate
utilfeature . DefaultFeatureGate . Set ( "ExperimentalCriticalPodAnnotation=False" )
// induce memory pressure!
fakeClock . Step ( 1 * time . Minute )
summaryProvider . result = summaryStatsMaker ( "500Mi" , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2017-03-02 01:56:24 +00:00
// we should have memory pressure
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure" )
}
// check the right pod was killed
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
}
}
// TestAllocatableMemoryPressure
func TestAllocatableMemoryPressure ( t * testing . T ) {
2017-10-10 00:34:54 +00:00
enablePodPriority ( true )
2017-03-02 01:56:24 +00:00
podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats
constantCapacity := "4Gi"
podsToMake := [ ] podToMake {
2017-10-10 00:34:54 +00:00
{ name : "guaranteed-low-priority-high-usage" , priority : lowPriority , requests : newResourceList ( "100m" , "1Gi" ) , limits : newResourceList ( "100m" , "1Gi" ) , memoryWorkingSet : "900Mi" } ,
{ name : "burstable-below-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , memoryWorkingSet : "50Mi" } ,
{ name : "burstable-above-requests" , priority : defaultPriority , requests : newResourceList ( "100m" , "100Mi" ) , limits : newResourceList ( "200m" , "1Gi" ) , memoryWorkingSet : "400Mi" } ,
{ name : "best-effort-high-priority-high-usage" , priority : highPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , memoryWorkingSet : "400Mi" } ,
{ name : "best-effort-low-priority-low-usage" , priority : lowPriority , requests : newResourceList ( "" , "" ) , limits : newResourceList ( "" , "" ) , memoryWorkingSet : "100Mi" } ,
2017-03-02 01:56:24 +00:00
}
pods := [ ] * v1 . Pod { }
podStats := map [ * v1 . Pod ] statsapi . PodStats { }
for _ , podToMake := range podsToMake {
2017-10-10 00:34:54 +00:00
pod , podStat := podMaker ( podToMake . name , podToMake . priority , podToMake . requests , podToMake . limits , podToMake . memoryWorkingSet )
2017-03-02 01:56:24 +00:00
pods = append ( pods , pod )
podStats [ pod ] = podStat
}
2017-10-10 00:34:54 +00:00
podToEvict := pods [ 4 ]
2017-03-02 01:56:24 +00:00
activePodsFunc := func ( ) [ ] * v1 . Pod {
return pods
}
fakeClock := clock . NewFakeClock ( time . Now ( ) )
podKiller := & mockPodKiller { }
diskInfoProvider := & mockDiskInfoProvider { dedicatedImageFs : false }
2017-07-07 20:40:13 +00:00
capacityProvider := newMockCapacityProvider ( v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "3Gi" ) } , v1 . ResourceList { v1 . ResourceMemory : * quantityMustParse ( "1Gi" ) } )
2017-05-22 18:00:22 +00:00
diskGC := & mockDiskGC { imageBytesFreed : int64 ( 0 ) , err : nil }
2017-07-15 05:25:54 +00:00
nodeRef := & v1 . ObjectReference { Kind : "Node" , Name : "test" , UID : types . UID ( "test" ) , Namespace : "" }
2017-03-02 01:56:24 +00:00
config := Config {
MaxPodGracePeriodSeconds : 5 ,
PressureTransitionPeriod : time . Minute * 5 ,
Thresholds : [ ] evictionapi . Threshold {
{
Signal : evictionapi . SignalAllocatableMemoryAvailable ,
Operator : evictionapi . OpLessThan ,
Value : evictionapi . ThresholdValue {
Quantity : quantityMustParse ( "1Ki" ) ,
} ,
} ,
} ,
}
summaryProvider := & fakeSummaryProvider { result : summaryStatsMaker ( constantCapacity , podStats ) }
manager := & managerImpl {
clock : fakeClock ,
killPodFunc : podKiller . killPodNow ,
2017-05-22 18:00:22 +00:00
imageGC : diskGC ,
containerGC : diskGC ,
2017-03-02 01:56:24 +00:00
config : config ,
recorder : & record . FakeRecorder { } ,
summaryProvider : summaryProvider ,
nodeRef : nodeRef ,
nodeConditionsLastObservedAt : nodeConditionsObservedAt { } ,
thresholdsFirstObservedAt : thresholdsObservedAt { } ,
}
// create a best effort pod to test admission
2017-10-10 00:34:54 +00:00
bestEffortPodToAdmit , _ := podMaker ( "best-admit" , defaultPriority , newResourceList ( "" , "" ) , newResourceList ( "" , "" ) , "0Gi" )
burstablePodToAdmit , _ := podMaker ( "burst-admit" , defaultPriority , newResourceList ( "100m" , "100Mi" ) , newResourceList ( "200m" , "200Mi" ) , "0Gi" )
2017-03-02 01:56:24 +00:00
// synchronize
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2017-03-02 01:56:24 +00:00
// we should not have memory pressure
if manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should not report memory pressure" )
}
// try to admit our pods (they should succeed)
expected := [ ] bool { true , true }
for i , pod := range [ ] * v1 . Pod { bestEffortPodToAdmit , burstablePodToAdmit } {
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : pod } ) ; expected [ i ] != result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , pod , expected [ i ] , result . Admit )
}
}
// induce memory pressure!
fakeClock . Step ( 1 * time . Minute )
2017-10-10 00:34:54 +00:00
pod , podStat := podMaker ( "guaranteed-high-2" , defaultPriority , newResourceList ( "100m" , "1Gi" ) , newResourceList ( "100m" , "1Gi" ) , "1Gi" )
2017-03-02 01:56:24 +00:00
podStats [ pod ] = podStat
summaryProvider . result = summaryStatsMaker ( constantCapacity , podStats )
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2017-01-28 22:48:35 +00:00
// we should have memory pressure
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure" )
}
// check the right pod was killed
if podKiller . pod != podToEvict {
t . Errorf ( "Manager chose to kill pod: %v, but should have chosen %v" , podKiller . pod . Name , podToEvict . Name )
}
2017-03-02 01:56:24 +00:00
observedGracePeriod := * podKiller . gracePeriodOverride
if observedGracePeriod != int64 ( 0 ) {
t . Errorf ( "Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
}
// reset state
podKiller . pod = nil
podKiller . gracePeriodOverride = nil
// the best-effort pod should not admit, burstable should
expected = [ ] bool { false , true }
for i , pod := range [ ] * v1 . Pod { bestEffortPodToAdmit , burstablePodToAdmit } {
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : pod } ) ; expected [ i ] != result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , pod , expected [ i ] , result . Admit )
}
}
// reduce memory pressure
fakeClock . Step ( 1 * time . Minute )
for pod := range podStats {
if pod . Name == "guaranteed-high-2" {
delete ( podStats , pod )
}
}
summaryProvider . result = summaryStatsMaker ( constantCapacity , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2017-03-02 01:56:24 +00:00
// we should have memory pressure (because transition period not yet met)
if ! manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should report memory pressure" )
}
// no pod should have been killed
if podKiller . pod != nil {
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
}
// the best-effort pod should not admit, burstable should
expected = [ ] bool { false , true }
for i , pod := range [ ] * v1 . Pod { bestEffortPodToAdmit , burstablePodToAdmit } {
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : pod } ) ; expected [ i ] != result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , pod , expected [ i ] , result . Admit )
}
}
// move the clock past transition period to ensure that we stop reporting pressure
fakeClock . Step ( 5 * time . Minute )
summaryProvider . result = summaryStatsMaker ( constantCapacity , podStats )
podKiller . pod = nil // reset state
2017-07-07 20:40:13 +00:00
manager . synchronize ( diskInfoProvider , activePodsFunc , capacityProvider )
2017-03-02 01:56:24 +00:00
// we should not have memory pressure (because transition period met)
if manager . IsUnderMemoryPressure ( ) {
t . Errorf ( "Manager should not report memory pressure" )
}
// no pod should have been killed
if podKiller . pod != nil {
t . Errorf ( "Manager chose to kill pod: %v when no pod should have been killed" , podKiller . pod . Name )
}
// all pods should admit now
expected = [ ] bool { true , true }
for i , pod := range [ ] * v1 . Pod { bestEffortPodToAdmit , burstablePodToAdmit } {
if result := manager . Admit ( & lifecycle . PodAdmitAttributes { Pod : pod } ) ; expected [ i ] != result . Admit {
t . Errorf ( "Admit pod: %v, expected: %v, actual: %v" , pod , expected [ i ] , result . Admit )
}
}
2017-01-28 22:48:35 +00:00
}