fix pod eviction for gracefully terminationg pods

pull/6/head
Mike Danese 2016-02-26 16:59:16 -08:00
parent 420d98fd8b
commit c1a7e280a3
2 changed files with 11 additions and 17 deletions

View File

@ -120,7 +120,7 @@ type NodeController struct {
daemonSetController *framework.Controller daemonSetController *framework.Controller
daemonSetStore cache.StoreToDaemonSetLister daemonSetStore cache.StoreToDaemonSetLister
forcefullyDeletePod func(*api.Pod) forcefullyDeletePod func(*api.Pod) error
} }
// NewNodeController returns a new node controller to sync instances from cloudprovider. // NewNodeController returns a new node controller to sync instances from cloudprovider.
@ -167,7 +167,7 @@ func NewNodeController(
now: unversioned.Now, now: unversioned.Now,
clusterCIDR: clusterCIDR, clusterCIDR: clusterCIDR,
allocateNodeCIDRs: allocateNodeCIDRs, allocateNodeCIDRs: allocateNodeCIDRs,
forcefullyDeletePod: func(p *api.Pod) { forcefullyDeletePod(kubeClient, p) }, forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) },
} }
nc.podStore.Store, nc.podController = framework.NewInformer( nc.podStore.Store, nc.podController = framework.NewInformer(
@ -329,7 +329,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
// delete terminating pods that have not yet been scheduled // delete terminating pods that have not yet been scheduled
if len(pod.Spec.NodeName) == 0 { if len(pod.Spec.NodeName) == 0 {
nc.forcefullyDeletePod(pod) utilruntime.HandleError(nc.forcefullyDeletePod(pod))
return return
} }
@ -345,7 +345,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
// delete terminating pods that have been scheduled on // delete terminating pods that have been scheduled on
// nonexistent nodes // nonexistent nodes
if !found { if !found {
nc.forcefullyDeletePod(pod) utilruntime.HandleError(nc.forcefullyDeletePod(pod))
return return
} }
@ -358,21 +358,18 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion) v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
if err != nil { if err != nil {
glog.Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err) glog.Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err)
nc.forcefullyDeletePod(pod) utilruntime.HandleError(nc.forcefullyDeletePod(pod))
return return
} }
if gracefulDeletionVersion.GT(v) { if gracefulDeletionVersion.GT(v) {
nc.forcefullyDeletePod(pod) utilruntime.HandleError(nc.forcefullyDeletePod(pod))
return return
} }
} }
func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) { func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) error {
var zero int64 var zero int64
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero}) return c.Core().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero})
if err != nil {
utilruntime.HandleError(err)
}
} }
// monitorNodeStatus verifies node status are constantly updated by kubelet, and if not, // monitorNodeStatus verifies node status are constantly updated by kubelet, and if not,
@ -781,10 +778,6 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
if pod.Spec.NodeName != nodeName { if pod.Spec.NodeName != nodeName {
continue continue
} }
// if the pod has already been deleted, ignore it
if pod.DeletionGracePeriodSeconds != nil {
continue
}
// if the pod is managed by a daemonset, ignore it // if the pod is managed by a daemonset, ignore it
_, err := nc.daemonSetStore.GetPodDaemonSets(&pod) _, err := nc.daemonSetStore.GetPodDaemonSets(&pod)
if err == nil { // No error means at least one daemonset was found if err == nil { // No error means at least one daemonset was found
@ -793,7 +786,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
glog.V(2).Infof("Starting deletion of pod %v", pod.Name) glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
nc.recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) nc.recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := nc.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { if err := nc.forcefullyDeletePod(&pod); err != nil {
return false, err return false, err
} }
remaining = true remaining = true

View File

@ -1040,8 +1040,9 @@ func TestCheckPod(t *testing.T) {
for i, tc := range tcs { for i, tc := range tcs {
var deleteCalls int var deleteCalls int
nc.forcefullyDeletePod = func(_ *api.Pod) { nc.forcefullyDeletePod = func(_ *api.Pod) error {
deleteCalls++ deleteCalls++
return nil
} }
nc.maybeDeleteTerminatingPod(&tc.pod) nc.maybeDeleteTerminatingPod(&tc.pod)