Merge pull request #31401 from yujuhong/resource_name

Automatic merge from submit-queue

Print out resource name when evicting pods

This fixes #31397


/cc @derekwaynecarr
pull/6/head
Kubernetes Submit Queue 2016-08-26 18:55:21 -07:00 committed by GitHub
commit 087d431409
2 changed files with 14 additions and 5 deletions

View File

@ -98,8 +98,12 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
return lifecycle.PodAdmitResult{Admit: true}
}
// the node has memory pressure, admit if not best-effort
// Check the node conditions to identify the resource under pressure.
// The resource can only be either disk or memory; set the default to disk.
resource := api.ResourceStorage
if hasNodeCondition(m.nodeConditions, api.NodeMemoryPressure) {
resource = api.ResourceMemory
// the node has memory pressure, admit if not best-effort
notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod)
if notBestEffort {
return lifecycle.PodAdmitResult{Admit: true}
@ -107,11 +111,11 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
}
// reject pods when under memory pressure (if pod is best effort), or if under disk pressure.
glog.Warningf("Failed to admit pod %v - %s", format.Pod(attrs.Pod), "node has conditions: %v", m.nodeConditions)
glog.Warningf("Failed to admit pod %q - node has conditions: %v", format.Pod(attrs.Pod), m.nodeConditions)
return lifecycle.PodAdmitResult{
Admit: false,
Reason: reason,
Message: message,
Message: getMessage(resource),
}
}
@ -244,6 +248,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
glog.Infof("eviction manager: pods ranked for eviction: %s", format.Pods(activePods))
// we kill at most a single pod during each eviction interval
message := getMessage(resourceToReclaim)
for i := range activePods {
pod := activePods[i]
status := api.PodStatus{

View File

@ -37,8 +37,8 @@ const (
unsupportedEvictionSignal = "unsupported eviction signal %v"
// the reason reported back in status.
reason = "Evicted"
// the message associated with the reason.
message = "The node was low on compute resources."
// the message format associated with the reason.
messageFmt = "The node was low on %s."
// disk, in bytes. internal to this module, used to account for local disk usage.
resourceDisk api.ResourceName = "disk"
// inodes, number. internal to this module, used to account for local disk inode consumption.
@ -888,3 +888,7 @@ func deleteImages(imageGC ImageGC, reportBytesFreed bool) nodeReclaimFunc {
return resource.NewQuantity(reclaimed, resource.BinarySI), nil
}
}
func getMessage(resource api.ResourceName) string {
return fmt.Sprintf(messageFmt, resource)
}