2014-10-08 23:14:37 +00:00
/ *
2015-05-01 16:19:44 +00:00
Copyright 2014 The Kubernetes Authors All rights reserved .
2014-10-08 23:14:37 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
2015-03-23 22:59:38 +00:00
package nodecontroller
2014-10-08 23:14:37 +00:00
import (
2015-01-16 22:28:20 +00:00
"errors"
2015-02-04 21:56:59 +00:00
"fmt"
2015-01-09 21:14:39 +00:00
"net"
2015-09-07 13:04:15 +00:00
"sync"
2014-10-14 22:45:09 +00:00
"time"
2014-10-08 23:14:37 +00:00
2015-08-05 22:05:17 +00:00
"github.com/golang/glog"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/api"
2015-09-17 22:21:55 +00:00
"k8s.io/kubernetes/pkg/api/unversioned"
2015-09-03 21:40:58 +00:00
"k8s.io/kubernetes/pkg/client/record"
2015-09-03 21:43:19 +00:00
client "k8s.io/kubernetes/pkg/client/unversioned"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
2015-09-09 17:45:01 +00:00
"k8s.io/kubernetes/pkg/util/sets"
2014-10-08 23:14:37 +00:00
)
2015-01-16 22:28:20 +00:00
var (
2015-09-21 23:38:43 +00:00
ErrCloudInstance = errors . New ( "cloud provider doesn't support instances." )
2015-01-16 22:28:20 +00:00
)
2015-05-19 11:23:59 +00:00
const (
// nodeStatusUpdateRetry controls the number of retries of writing NodeStatus update.
nodeStatusUpdateRetry = 5
// controls how often NodeController will try to evict Pods from non-responsive Nodes.
nodeEvictionPeriod = 100 * time . Millisecond
)
2015-03-31 11:17:12 +00:00
2015-04-10 22:30:11 +00:00
type nodeStatusData struct {
2015-09-17 22:21:55 +00:00
probeTimestamp unversioned . Time
readyTransitionTimestamp unversioned . Time
2015-03-31 15:15:39 +00:00
status api . NodeStatus
}
2014-12-19 09:27:01 +00:00
type NodeController struct {
2015-08-04 12:44:14 +00:00
allocateNodeCIDRs bool
2015-04-02 15:13:13 +00:00
cloud cloudprovider . Interface
2015-08-04 12:44:14 +00:00
clusterCIDR * net . IPNet
2015-04-02 15:13:13 +00:00
deletingPodsRateLimiter util . RateLimiter
2015-09-09 17:45:01 +00:00
knownNodeSet sets . String
2015-08-04 12:44:14 +00:00
kubeClient client . Interface
// Method for easy mocking in unittest.
lookupIP func ( host string ) ( [ ] net . IP , error )
2015-03-31 11:17:12 +00:00
// Value used if sync_nodes_status=False. NodeController will not proactively
// sync node status in this case, but will monitor node status updated from kubelet. If
// it doesn't receive update for this amount of time, it will start posting "NodeReady==
// ConditionUnknown". The amount of time before which NodeController start evicting pods
2015-08-11 20:29:50 +00:00
// is controlled via flag 'pod-eviction-timeout'.
2015-03-31 11:17:12 +00:00
// Note: be cautious when changing the constant, it must work with nodeStatusUpdateFrequency
// in kubelet. There are several constraints:
// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where
// N means number of retries allowed for kubelet to post node status. It is pointless
// to make nodeMonitorGracePeriod be less than nodeStatusUpdateFrequency, since there
// will only be fresh values from Kubelet at an interval of nodeStatusUpdateFrequency.
// The constant must be less than podEvictionTimeout.
// 2. nodeMonitorGracePeriod can't be too large for user experience - larger value takes
// longer for user to see up-to-date node status.
nodeMonitorGracePeriod time . Duration
// Value controlling NodeController monitoring period, i.e. how often does NodeController
2015-04-07 19:36:09 +00:00
// check node status posted from kubelet. This value should be lower than nodeMonitorGracePeriod.
2015-03-31 11:17:12 +00:00
// TODO: Change node status monitor to watch based.
nodeMonitorPeriod time . Duration
2015-08-04 12:44:14 +00:00
// Value used if sync_nodes_status=False, only for node startup. When node
// is just created, e.g. cluster bootstrap or node creation, we give a longer grace period.
nodeStartupGracePeriod time . Duration
// per Node map storing last observed Status together with a local time when it was observed.
// This timestamp is to be used instead of LastProbeTime stored in Condition. We do this
// to aviod the problem with time skew across the cluster.
nodeStatusMap map [ string ] nodeStatusData
2015-09-17 22:21:55 +00:00
now func ( ) unversioned . Time
2015-09-07 13:04:15 +00:00
// Lock to access evictor workers
evictorLock * sync . Mutex
// workers that evicts pods from unresponsive nodes.
2015-08-21 01:11:40 +00:00
podEvictor * RateLimitedTimedQueue
terminationEvictor * RateLimitedTimedQueue
2015-08-04 12:44:14 +00:00
podEvictionTimeout time . Duration
2015-08-21 01:11:40 +00:00
// The maximum duration before a pod evicted from a node can be forcefully terminated.
maximumGracePeriod time . Duration
2015-08-04 12:44:14 +00:00
recorder record . EventRecorder
2014-10-08 23:14:37 +00:00
}
2014-12-19 09:27:01 +00:00
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController (
2014-10-14 22:45:09 +00:00
cloud cloudprovider . Interface ,
2015-01-16 22:28:20 +00:00
kubeClient client . Interface ,
2015-04-02 15:13:13 +00:00
podEvictionTimeout time . Duration ,
2015-09-29 09:29:51 +00:00
deletionEvictionLimiter util . RateLimiter ,
terminationEvictionLimiter util . RateLimiter ,
2015-03-31 11:17:12 +00:00
nodeMonitorGracePeriod time . Duration ,
nodeStartupGracePeriod time . Duration ,
2015-04-01 12:52:28 +00:00
nodeMonitorPeriod time . Duration ,
2015-05-06 21:48:45 +00:00
clusterCIDR * net . IPNet ,
2015-04-28 15:02:45 +00:00
allocateNodeCIDRs bool ) * NodeController {
2015-04-08 11:45:37 +00:00
eventBroadcaster := record . NewBroadcaster ( )
recorder := eventBroadcaster . NewRecorder ( api . EventSource { Component : "controllermanager" } )
2015-06-03 06:51:32 +00:00
eventBroadcaster . StartLogging ( glog . Infof )
2015-04-08 11:45:37 +00:00
if kubeClient != nil {
glog . Infof ( "Sending events to api server." )
eventBroadcaster . StartRecordingToSink ( kubeClient . Events ( "" ) )
} else {
glog . Infof ( "No api server defined - no events will be sent to API server." )
}
2015-05-06 21:48:45 +00:00
if allocateNodeCIDRs && clusterCIDR == nil {
glog . Fatal ( "NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true." )
}
2015-09-07 13:04:15 +00:00
evictorLock := sync . Mutex { }
2014-12-19 09:27:01 +00:00
return & NodeController {
2015-05-19 11:23:59 +00:00
cloud : cloud ,
2015-09-09 17:45:01 +00:00
knownNodeSet : make ( sets . String ) ,
2015-05-19 11:23:59 +00:00
kubeClient : kubeClient ,
recorder : recorder ,
podEvictionTimeout : podEvictionTimeout ,
2015-08-21 01:11:40 +00:00
maximumGracePeriod : 5 * time . Minute ,
2015-09-07 13:04:15 +00:00
evictorLock : & evictorLock ,
2015-09-29 09:29:51 +00:00
podEvictor : NewRateLimitedTimedQueue ( deletionEvictionLimiter ) ,
terminationEvictor : NewRateLimitedTimedQueue ( terminationEvictionLimiter ) ,
2015-05-19 11:23:59 +00:00
nodeStatusMap : make ( map [ string ] nodeStatusData ) ,
nodeMonitorGracePeriod : nodeMonitorGracePeriod ,
nodeMonitorPeriod : nodeMonitorPeriod ,
nodeStartupGracePeriod : nodeStartupGracePeriod ,
lookupIP : net . LookupIP ,
2015-09-17 22:21:55 +00:00
now : unversioned . Now ,
2015-05-19 11:23:59 +00:00
clusterCIDR : clusterCIDR ,
allocateNodeCIDRs : allocateNodeCIDRs ,
2014-10-14 22:45:09 +00:00
}
}
2015-08-04 12:44:14 +00:00
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func ( nc * NodeController ) Run ( period time . Duration ) {
// Incorporate the results of node status pushed from kubelet to master.
2015-08-24 01:59:15 +00:00
go util . Until ( func ( ) {
2015-08-04 12:44:14 +00:00
if err := nc . monitorNodeStatus ( ) ; err != nil {
glog . Errorf ( "Error monitoring node status: %v" , err )
}
2015-08-24 01:59:15 +00:00
} , nc . nodeMonitorPeriod , util . NeverStop )
2015-08-04 12:44:14 +00:00
2015-08-25 13:47:08 +00:00
// Managing eviction of nodes:
// 1. when we delete pods off a node, if the node was not empty at the time we then
// queue a termination watcher
// a. If we hit an error, retry deletion
// 2. The terminator loop ensures that pods are eventually cleaned and we never
// terminate a pod in a time period less than nc.maximumGracePeriod. AddedAt
// is the time from which we measure "has this pod been terminating too long",
// after which we will delete the pod with grace period 0 (force delete).
// a. If we hit errors, retry instantly
// b. If there are no pods left terminating, exit
// c. If there are pods still terminating, wait for their estimated completion
// before retrying
2015-08-24 01:59:15 +00:00
go util . Until ( func ( ) {
2015-09-07 13:04:15 +00:00
nc . evictorLock . Lock ( )
defer nc . evictorLock . Unlock ( )
2015-08-21 01:11:40 +00:00
nc . podEvictor . Try ( func ( value TimedValue ) ( bool , time . Duration ) {
remaining , err := nc . deletePods ( value . Value )
if err != nil {
util . HandleError ( fmt . Errorf ( "unable to evict node %q: %v" , value . Value , err ) )
return false , 0
}
if remaining {
nc . terminationEvictor . Add ( value . Value )
}
return true , 0
} )
2015-08-24 01:59:15 +00:00
} , nodeEvictionPeriod , util . NeverStop )
2015-08-04 12:44:14 +00:00
2015-08-21 01:11:40 +00:00
// TODO: replace with a controller that ensures pods that are terminating complete
// in a particular time period
go util . Until ( func ( ) {
2015-09-07 13:04:15 +00:00
nc . evictorLock . Lock ( )
defer nc . evictorLock . Unlock ( )
2015-08-21 01:11:40 +00:00
nc . terminationEvictor . Try ( func ( value TimedValue ) ( bool , time . Duration ) {
2015-08-25 13:47:08 +00:00
completed , remaining , err := nc . terminatePods ( value . Value , value . AddedAt )
2015-08-21 01:11:40 +00:00
if err != nil {
util . HandleError ( fmt . Errorf ( "unable to terminate pods on node %q: %v" , value . Value , err ) )
return false , 0
}
2015-08-19 00:34:49 +00:00
2015-08-21 01:11:40 +00:00
if completed {
2015-08-25 13:47:08 +00:00
glog . Infof ( "All pods terminated on %s" , value . Value )
2015-08-21 01:11:40 +00:00
nc . recordNodeEvent ( value . Value , "TerminatedAllPods" , fmt . Sprintf ( "Terminated all Pods on Node %s." , value . Value ) )
return true , 0
}
2015-08-19 00:34:49 +00:00
2015-08-25 13:47:08 +00:00
glog . V ( 2 ) . Infof ( "Pods terminating since %s on %q, estimated completion %s" , value . AddedAt , value . Value , remaining )
2015-08-21 01:11:40 +00:00
// clamp very short intervals
if remaining < nodeEvictionPeriod {
remaining = nodeEvictionPeriod
}
return false , remaining
} )
} , nodeEvictionPeriod , util . NeverStop )
2015-08-04 12:44:14 +00:00
}
2015-02-24 12:32:44 +00:00
// Generates num pod CIDRs that could be assigned to nodes.
2015-09-09 17:45:01 +00:00
func generateCIDRs ( clusterCIDR * net . IPNet , num int ) sets . String {
res := sets . NewString ( )
2015-05-20 21:21:03 +00:00
cidrIP := clusterCIDR . IP . To4 ( )
2015-02-24 12:32:44 +00:00
for i := 0 ; i < num ; i ++ {
// TODO: Make the CIDRs configurable.
2015-05-06 21:48:45 +00:00
b1 := byte ( i >> 8 )
b2 := byte ( i % 256 )
res . Insert ( fmt . Sprintf ( "%d.%d.%d.0/24" , cidrIP [ 0 ] , cidrIP [ 1 ] + b1 , cidrIP [ 2 ] + b2 ) )
2015-02-24 12:32:44 +00:00
}
return res
}
2015-08-04 12:44:14 +00:00
// getCondition returns a condition object for the specific condition
// type, nil if the condition is not set.
func ( nc * NodeController ) getCondition ( status * api . NodeStatus , conditionType api . NodeConditionType ) * api . NodeCondition {
if status == nil {
return nil
}
for i := range status . Conditions {
if status . Conditions [ i ] . Type == conditionType {
return & status . Conditions [ i ]
}
}
return nil
}
// monitorNodeStatus verifies node status are constantly updated by kubelet, and if not,
// post "NodeReady==ConditionUnknown". It also evicts all pods if node is not ready or
// not reachable for a long period of time.
func ( nc * NodeController ) monitorNodeStatus ( ) error {
nodes , err := nc . kubeClient . Nodes ( ) . List ( labels . Everything ( ) , fields . Everything ( ) )
2015-08-19 16:54:08 +00:00
if err != nil {
return err
}
2015-08-05 13:22:13 +00:00
for _ , node := range nodes . Items {
if ! nc . knownNodeSet . Has ( node . Name ) {
glog . V ( 1 ) . Infof ( "NodeController observed a new Node: %#v" , node )
2015-08-11 17:05:53 +00:00
nc . recordNodeEvent ( node . Name , "RegisteredNode" , fmt . Sprintf ( "Registered Node %v in NodeController" , node . Name ) )
2015-08-25 13:47:08 +00:00
nc . cancelPodEviction ( node . Name )
2015-08-05 13:22:13 +00:00
nc . knownNodeSet . Insert ( node . Name )
}
}
// If there's a difference between lengths of known Nodes and observed nodes
// we must have removed some Node.
if len ( nc . knownNodeSet ) != len ( nodes . Items ) {
2015-09-09 17:45:01 +00:00
observedSet := make ( sets . String )
2015-08-05 13:22:13 +00:00
for _ , node := range nodes . Items {
observedSet . Insert ( node . Name )
}
deleted := nc . knownNodeSet . Difference ( observedSet )
2015-08-25 13:47:08 +00:00
for nodeName := range deleted {
glog . V ( 1 ) . Infof ( "NodeController observed a Node deletion: %v" , nodeName )
nc . recordNodeEvent ( nodeName , "RemovingNode" , fmt . Sprintf ( "Removing Node %v from NodeController" , nodeName ) )
nc . evictPods ( nodeName )
nc . knownNodeSet . Delete ( nodeName )
2015-08-05 13:22:13 +00:00
}
}
2015-08-04 12:44:14 +00:00
if nc . allocateNodeCIDRs {
// TODO (cjcullen): Use pkg/controller/framework to watch nodes and
// reduce lists/decouple this from monitoring status.
nc . reconcileNodeCIDRs ( nodes )
}
for i := range nodes . Items {
var gracePeriod time . Duration
var lastReadyCondition api . NodeCondition
var readyCondition * api . NodeCondition
node := & nodes . Items [ i ]
for rep := 0 ; rep < nodeStatusUpdateRetry ; rep ++ {
gracePeriod , lastReadyCondition , readyCondition , err = nc . tryUpdateNodeStatus ( node )
if err == nil {
break
}
name := node . Name
node , err = nc . kubeClient . Nodes ( ) . Get ( name )
if err != nil {
glog . Errorf ( "Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted." , name )
break
}
}
if err != nil {
glog . Errorf ( "Update status of Node %v from NodeController exceeds retry count." +
"Skipping - no pods will be evicted." , node . Name )
continue
}
decisionTimestamp := nc . now ( )
if readyCondition != nil {
// Check eviction timeout against decisionTimestamp
if lastReadyCondition . Status == api . ConditionFalse &&
decisionTimestamp . After ( nc . nodeStatusMap [ node . Name ] . readyTransitionTimestamp . Add ( nc . podEvictionTimeout ) ) {
2015-08-25 13:47:08 +00:00
if nc . evictPods ( node . Name ) {
glog . Infof ( "Evicting pods on node %s: %v is later than %v + %v" , node . Name , decisionTimestamp , nc . nodeStatusMap [ node . Name ] . readyTransitionTimestamp , nc . podEvictionTimeout )
2015-08-04 12:44:14 +00:00
}
}
if lastReadyCondition . Status == api . ConditionUnknown &&
decisionTimestamp . After ( nc . nodeStatusMap [ node . Name ] . probeTimestamp . Add ( nc . podEvictionTimeout - gracePeriod ) ) {
2015-08-25 13:47:08 +00:00
if nc . evictPods ( node . Name ) {
glog . Infof ( "Evicting pods on node %s: %v is later than %v + %v" , node . Name , decisionTimestamp , nc . nodeStatusMap [ node . Name ] . readyTransitionTimestamp , nc . podEvictionTimeout - gracePeriod )
2015-08-04 12:44:14 +00:00
}
}
if lastReadyCondition . Status == api . ConditionTrue {
2015-08-25 13:47:08 +00:00
if nc . cancelPodEviction ( node . Name ) {
glog . Infof ( "Node %s is ready again, cancelled pod eviction" , node . Name )
2015-08-04 12:44:14 +00:00
}
}
// Report node event.
if readyCondition . Status != api . ConditionTrue && lastReadyCondition . Status == api . ConditionTrue {
nc . recordNodeStatusChange ( node , "NodeNotReady" )
}
// Check with the cloud provider to see if the node still exists. If it
// doesn't, delete the node and all pods scheduled on the node.
if readyCondition . Status != api . ConditionTrue && nc . cloud != nil {
instances , ok := nc . cloud . Instances ( )
if ! ok {
glog . Errorf ( "%v" , ErrCloudInstance )
continue
}
if _ , err := instances . ExternalID ( node . Name ) ; err != nil && err == cloudprovider . InstanceNotFound {
glog . Infof ( "Deleting node (no longer present in cloud provider): %s" , node . Name )
2015-08-21 01:11:40 +00:00
nc . recordNodeEvent ( node . Name , "DeletingNode" , fmt . Sprintf ( "Deleting Node %v because it's not present according to cloud provider" , node . Name ) )
remaining , err := nc . hasPods ( node . Name )
if err != nil {
glog . Errorf ( "Unable to determine whether node %s has pods, will retry: %v" , node . Name , err )
continue
}
if remaining {
// queue eviction of the pods on the node
2015-08-25 13:47:08 +00:00
glog . V ( 2 ) . Infof ( "Deleting node %s is delayed while pods are evicted" , node . Name )
nc . evictPods ( node . Name )
2015-08-11 19:33:35 +00:00
continue
}
2015-08-21 01:11:40 +00:00
2015-08-04 12:44:14 +00:00
if err := nc . kubeClient . Nodes ( ) . Delete ( node . Name ) ; err != nil {
glog . Errorf ( "Unable to delete node %s: %v" , node . Name , err )
continue
}
}
}
}
}
return nil
}
2015-05-21 00:24:30 +00:00
// reconcileNodeCIDRs looks at each node and assigns it a valid CIDR
2015-05-20 21:21:03 +00:00
// if it doesn't currently have one.
2015-05-21 00:24:30 +00:00
func ( nc * NodeController ) reconcileNodeCIDRs ( nodes * api . NodeList ) {
glog . V ( 4 ) . Infof ( "Reconciling cidrs for %d nodes" , len ( nodes . Items ) )
2015-05-20 21:21:03 +00:00
// TODO(roberthbailey): This seems inefficient. Why re-calculate CIDRs
// on each sync period?
availableCIDRs := generateCIDRs ( nc . clusterCIDR , len ( nodes . Items ) )
for _ , node := range nodes . Items {
if node . Spec . PodCIDR != "" {
glog . V ( 4 ) . Infof ( "CIDR %s is already being used by node %s" , node . Spec . PodCIDR , node . Name )
availableCIDRs . Delete ( node . Spec . PodCIDR )
2015-02-24 12:32:44 +00:00
}
}
2015-05-20 21:21:03 +00:00
for _ , node := range nodes . Items {
if node . Spec . PodCIDR == "" {
podCIDR , found := availableCIDRs . PopAny ( )
if ! found {
2015-09-17 13:59:48 +00:00
nc . recordNodeStatusChange ( & node , "CIDRNotAvailable" )
2015-05-20 21:21:03 +00:00
continue
2015-05-20 20:47:51 +00:00
}
2015-05-20 21:21:03 +00:00
glog . V ( 4 ) . Infof ( "Assigning node %s CIDR %s" , node . Name , podCIDR )
node . Spec . PodCIDR = podCIDR
if _ , err := nc . kubeClient . Nodes ( ) . Update ( & node ) ; err != nil {
2015-09-17 13:59:48 +00:00
nc . recordNodeStatusChange ( & node , "CIDRAssignmentFailed" )
2015-05-20 20:47:51 +00:00
}
2015-02-24 12:32:44 +00:00
}
2015-05-20 20:47:51 +00:00
}
2015-02-24 12:32:44 +00:00
}
2015-05-20 20:47:51 +00:00
2015-08-11 17:05:53 +00:00
func ( nc * NodeController ) recordNodeEvent ( nodeName string , reason string , event string ) {
2015-08-04 12:44:14 +00:00
ref := & api . ObjectReference {
Kind : "Node" ,
Name : nodeName ,
UID : types . UID ( nodeName ) ,
Namespace : "" ,
}
glog . V ( 2 ) . Infof ( "Recording %s event message for node %s" , event , nodeName )
2015-08-11 17:05:53 +00:00
nc . recorder . Eventf ( ref , reason , "Node %s event: %s" , nodeName , event )
2014-11-06 02:02:11 +00:00
}
2015-07-31 12:18:20 +00:00
func ( nc * NodeController ) recordNodeStatusChange ( node * api . Node , new_status string ) {
2015-04-08 11:45:37 +00:00
ref := & api . ObjectReference {
Kind : "Node" ,
Name : node . Name ,
UID : types . UID ( node . Name ) ,
Namespace : "" ,
}
2015-07-31 12:18:20 +00:00
glog . V ( 2 ) . Infof ( "Recording status change %s event message for node %s" , new_status , node . Name )
2015-04-08 11:45:37 +00:00
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
2015-07-31 12:18:20 +00:00
nc . recorder . Eventf ( ref , new_status , "Node %s status is now: %s" , node . Name , new_status )
}
2015-03-31 15:15:39 +00:00
// For a given node checks its conditions and tries to update it. Returns grace period to which given node
2015-09-12 19:16:22 +00:00
// is entitled, state of current and last observed Ready Condition, and an error if it occurred.
2015-03-31 15:15:39 +00:00
func ( nc * NodeController ) tryUpdateNodeStatus ( node * api . Node ) ( time . Duration , api . NodeCondition , * api . NodeCondition , error ) {
2015-03-30 12:44:02 +00:00
var err error
var gracePeriod time . Duration
var lastReadyCondition api . NodeCondition
2015-03-31 15:15:39 +00:00
readyCondition := nc . getCondition ( & node . Status , api . NodeReady )
2015-03-30 12:44:02 +00:00
if readyCondition == nil {
// If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
// A fake ready condition is created, where LastProbeTime and LastTransitionTime is set
// to node.CreationTimestamp to avoid handle the corner case.
lastReadyCondition = api . NodeCondition {
Type : api . NodeReady ,
Status : api . ConditionUnknown ,
2015-03-27 14:09:51 +00:00
LastHeartbeatTime : node . CreationTimestamp ,
2015-03-30 12:44:02 +00:00
LastTransitionTime : node . CreationTimestamp ,
}
2015-03-31 11:17:12 +00:00
gracePeriod = nc . nodeStartupGracePeriod
2015-04-10 22:30:11 +00:00
nc . nodeStatusMap [ node . Name ] = nodeStatusData {
2015-03-31 15:15:39 +00:00
status : node . Status ,
probeTimestamp : node . CreationTimestamp ,
readyTransitionTimestamp : node . CreationTimestamp ,
}
2015-03-30 12:44:02 +00:00
} else {
// If ready condition is not nil, make a copy of it, since we may modify it in place later.
lastReadyCondition = * readyCondition
2015-03-31 11:17:12 +00:00
gracePeriod = nc . nodeMonitorGracePeriod
2015-03-30 12:44:02 +00:00
}
2015-03-31 15:15:39 +00:00
savedNodeStatus , found := nc . nodeStatusMap [ node . Name ]
// There are following cases to check:
// - both saved and new status have no Ready Condition set - we leave everything as it is,
// - saved status have no Ready Condition, but current one does - NodeController was restarted with Node data already present in etcd,
// - saved status have some Ready Condition, but current one does not - it's an error, but we fill it up because that's probably a good thing to do,
// - both saved and current statuses have Ready Conditions and they have the same LastProbeTime - nothing happened on that Node, it may be
// unresponsive, so we leave it as it is,
// - both saved and current statuses have Ready Conditions, they have different LastProbeTimes, but the same Ready Condition State -
// everything's in order, no transition occurred, we update only probeTimestamp,
// - both saved and current statuses have Ready Conditions, different LastProbeTimes and different Ready Condition State -
// Ready Condition changed it state since we last seen it, so we update both probeTimestamp and readyTransitionTimestamp.
// TODO: things to consider:
2015-07-29 21:11:19 +00:00
// - if 'LastProbeTime' have gone back in time its probably an error, currently we ignore it,
2015-03-31 15:15:39 +00:00
// - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check
// if that's the case, but it does not seem necessary.
savedCondition := nc . getCondition ( & savedNodeStatus . status , api . NodeReady )
observedCondition := nc . getCondition ( & node . Status , api . NodeReady )
if ! found {
glog . Warningf ( "Missing timestamp for Node %s. Assuming now as a timestamp." , node . Name )
2015-04-10 22:30:11 +00:00
savedNodeStatus = nodeStatusData {
2015-03-31 15:15:39 +00:00
status : node . Status ,
probeTimestamp : nc . now ( ) ,
readyTransitionTimestamp : nc . now ( ) ,
}
nc . nodeStatusMap [ node . Name ] = savedNodeStatus
} else if savedCondition == nil && observedCondition != nil {
glog . V ( 1 ) . Infof ( "Creating timestamp entry for newly observed Node %s" , node . Name )
2015-04-10 22:30:11 +00:00
savedNodeStatus = nodeStatusData {
2015-03-31 15:15:39 +00:00
status : node . Status ,
probeTimestamp : nc . now ( ) ,
readyTransitionTimestamp : nc . now ( ) ,
}
nc . nodeStatusMap [ node . Name ] = savedNodeStatus
} else if savedCondition != nil && observedCondition == nil {
glog . Errorf ( "ReadyCondition was removed from Status of Node %s" , node . Name )
// TODO: figure out what to do in this case. For now we do the same thing as above.
2015-04-10 22:30:11 +00:00
savedNodeStatus = nodeStatusData {
2015-03-31 15:15:39 +00:00
status : node . Status ,
probeTimestamp : nc . now ( ) ,
readyTransitionTimestamp : nc . now ( ) ,
}
nc . nodeStatusMap [ node . Name ] = savedNodeStatus
2015-03-27 14:09:51 +00:00
} else if savedCondition != nil && observedCondition != nil && savedCondition . LastHeartbeatTime != observedCondition . LastHeartbeatTime {
2015-09-17 22:21:55 +00:00
var transitionTime unversioned . Time
2015-03-31 15:15:39 +00:00
// If ReadyCondition changed since the last time we checked, we update the transition timestamp to "now",
// otherwise we leave it as it is.
if savedCondition . LastTransitionTime != observedCondition . LastTransitionTime {
glog . V ( 3 ) . Infof ( "ReadyCondition for Node %s transitioned from %v to %v" , node . Name , savedCondition . Status , observedCondition )
transitionTime = nc . now ( )
} else {
transitionTime = savedNodeStatus . readyTransitionTimestamp
}
glog . V ( 3 ) . Infof ( "Nodes ReadyCondition updated. Updating timestamp: %+v\n vs %+v." , savedNodeStatus . status , node . Status )
2015-04-10 22:30:11 +00:00
savedNodeStatus = nodeStatusData {
2015-03-31 15:15:39 +00:00
status : node . Status ,
probeTimestamp : nc . now ( ) ,
readyTransitionTimestamp : transitionTime ,
}
nc . nodeStatusMap [ node . Name ] = savedNodeStatus
}
if nc . now ( ) . After ( savedNodeStatus . probeTimestamp . Add ( gracePeriod ) ) {
2015-03-30 12:44:02 +00:00
// NodeReady condition was last set longer ago than gracePeriod, so update it to Unknown
// (regardless of its current value) in the master, without contacting kubelet.
if readyCondition == nil {
2015-05-06 21:39:14 +00:00
glog . V ( 2 ) . Infof ( "node %v is never updated by kubelet" , node . Name )
2015-03-30 12:44:02 +00:00
node . Status . Conditions = append ( node . Status . Conditions , api . NodeCondition {
Type : api . NodeReady ,
Status : api . ConditionUnknown ,
2015-09-11 10:08:09 +00:00
Reason : "NodeStatusNeverUpdated" ,
Message : fmt . Sprintf ( "Kubelet never posted node status." ) ,
2015-03-27 14:09:51 +00:00
LastHeartbeatTime : node . CreationTimestamp ,
2015-03-30 12:44:02 +00:00
LastTransitionTime : nc . now ( ) ,
} )
} else {
glog . V ( 2 ) . Infof ( "node %v hasn't been updated for %+v. Last ready condition is: %+v" ,
2015-03-31 15:15:39 +00:00
node . Name , nc . now ( ) . Time . Sub ( savedNodeStatus . probeTimestamp . Time ) , lastReadyCondition )
2015-03-30 12:44:02 +00:00
if lastReadyCondition . Status != api . ConditionUnknown {
readyCondition . Status = api . ConditionUnknown
2015-09-11 10:08:09 +00:00
readyCondition . Reason = "NodeStatusUnknown"
readyCondition . Message = fmt . Sprintf ( "Kubelet stopped posting node status." )
2015-03-30 12:44:02 +00:00
// LastProbeTime is the last time we heard from kubelet.
2015-03-27 14:09:51 +00:00
readyCondition . LastHeartbeatTime = lastReadyCondition . LastHeartbeatTime
2015-03-30 12:44:02 +00:00
readyCondition . LastTransitionTime = nc . now ( )
}
}
2015-03-31 15:15:39 +00:00
if ! api . Semantic . DeepEqual ( nc . getCondition ( & node . Status , api . NodeReady ) , lastReadyCondition ) {
2015-04-08 09:32:47 +00:00
if _ , err = nc . kubeClient . Nodes ( ) . UpdateStatus ( node ) ; err != nil {
2015-03-31 15:15:39 +00:00
glog . Errorf ( "Error updating node %s: %v" , node . Name , err )
return gracePeriod , lastReadyCondition , readyCondition , err
} else {
2015-04-10 22:30:11 +00:00
nc . nodeStatusMap [ node . Name ] = nodeStatusData {
2015-03-31 15:15:39 +00:00
status : node . Status ,
probeTimestamp : nc . nodeStatusMap [ node . Name ] . probeTimestamp ,
readyTransitionTimestamp : nc . now ( ) ,
}
return gracePeriod , lastReadyCondition , readyCondition , nil
}
2015-03-30 12:44:02 +00:00
}
}
2015-03-31 15:15:39 +00:00
return gracePeriod , lastReadyCondition , readyCondition , err
2015-03-30 12:44:02 +00:00
}
2015-08-21 01:11:40 +00:00
// returns true if the provided node still has pods scheduled to it, or an error if
// the server could not be contacted.
2015-08-25 13:47:08 +00:00
func ( nc * NodeController ) hasPods ( nodeName string ) ( bool , error ) {
pods , err := nc . kubeClient . Pods ( api . NamespaceAll ) . List ( labels . Everything ( ) , fields . OneTermEqualSelector ( client . PodHost , nodeName ) )
2015-08-21 01:11:40 +00:00
if err != nil {
return false , err
}
return len ( pods . Items ) > 0 , nil
}
2015-08-25 13:47:08 +00:00
// evictPods queues an eviction for the provided node name, and returns false if the node is already
// queued for eviction.
func ( nc * NodeController ) evictPods ( nodeName string ) bool {
2015-09-07 13:04:15 +00:00
nc . evictorLock . Lock ( )
defer nc . evictorLock . Unlock ( )
2015-08-25 13:47:08 +00:00
return nc . podEvictor . Add ( nodeName )
}
// cancelPodEviction removes any queued evictions, typically because the node is available again. It
// returns true if an eviction was queued.
func ( nc * NodeController ) cancelPodEviction ( nodeName string ) bool {
2015-09-07 13:04:15 +00:00
nc . evictorLock . Lock ( )
defer nc . evictorLock . Unlock ( )
2015-08-25 13:47:08 +00:00
wasDeleting := nc . podEvictor . Remove ( nodeName )
wasTerminating := nc . terminationEvictor . Remove ( nodeName )
2015-09-15 21:45:56 +00:00
if wasDeleting || wasTerminating {
glog . V ( 2 ) . Infof ( "Cancelling pod Eviction on Node: %v" , nodeName )
return true
}
return false
2015-08-25 13:47:08 +00:00
}
2015-08-21 01:11:40 +00:00
// deletePods will delete all pods from master running on given node, and return true
// if any pods were deleted.
2015-08-25 13:47:08 +00:00
func ( nc * NodeController ) deletePods ( nodeName string ) ( bool , error ) {
2015-08-21 01:11:40 +00:00
remaining := false
2015-08-25 13:47:08 +00:00
pods , err := nc . kubeClient . Pods ( api . NamespaceAll ) . List ( labels . Everything ( ) , fields . OneTermEqualSelector ( client . PodHost , nodeName ) )
2015-08-21 01:11:40 +00:00
if err != nil {
return remaining , err
}
if len ( pods . Items ) > 0 {
2015-08-25 13:47:08 +00:00
nc . recordNodeEvent ( nodeName , "DeletingAllPods" , fmt . Sprintf ( "Deleting all Pods from Node %v." , nodeName ) )
2015-08-21 01:11:40 +00:00
}
for _ , pod := range pods . Items {
// Defensive check, also needed for tests.
2015-08-25 13:47:08 +00:00
if pod . Spec . NodeName != nodeName {
2015-08-21 01:11:40 +00:00
continue
}
// if the pod has already been deleted, ignore it
if pod . DeletionGracePeriodSeconds != nil {
continue
}
2015-09-29 09:29:51 +00:00
glog . V ( 2 ) . Infof ( "Starting deletion of pod %v" , pod . Name )
nc . recorder . Eventf ( & pod , "NodeControllerEviction" , "Marking for deletion Pod %s from Node %s" , pod . Name , nodeName )
2015-08-21 01:11:40 +00:00
if err := nc . kubeClient . Pods ( pod . Namespace ) . Delete ( pod . Name , nil ) ; err != nil {
return false , err
}
remaining = true
}
return remaining , nil
}
// terminatePods will ensure all pods on the given node that are in terminating state are eventually
2015-08-25 13:47:08 +00:00
// cleaned up. Returns true if the node has no pods in terminating state, a duration that indicates how
// long before we should check again (the next deadline for a pod to complete), or an error.
func ( nc * NodeController ) terminatePods ( nodeName string , since time . Time ) ( bool , time . Duration , error ) {
// the time before we should try again
nextAttempt := time . Duration ( 0 )
// have we deleted all pods
2015-08-21 01:11:40 +00:00
complete := true
pods , err := nc . kubeClient . Pods ( api . NamespaceAll ) . List ( labels . Everything ( ) ,
2015-08-25 13:47:08 +00:00
fields . OneTermEqualSelector ( client . PodHost , nodeName ) )
2015-08-21 01:11:40 +00:00
if err != nil {
2015-08-25 13:47:08 +00:00
return false , nextAttempt , err
2015-08-21 01:11:40 +00:00
}
now := time . Now ( )
elapsed := now . Sub ( since )
for _ , pod := range pods . Items {
// Defensive check, also needed for tests.
2015-08-25 13:47:08 +00:00
if pod . Spec . NodeName != nodeName {
2015-08-21 01:11:40 +00:00
continue
}
// only clean terminated pods
if pod . DeletionGracePeriodSeconds == nil {
continue
}
2015-08-25 13:47:08 +00:00
// the user's requested grace period
2015-08-21 01:11:40 +00:00
grace := time . Duration ( * pod . DeletionGracePeriodSeconds ) * time . Second
if grace > nc . maximumGracePeriod {
grace = nc . maximumGracePeriod
}
2015-08-25 13:47:08 +00:00
// the time remaining before the pod should have been deleted
remaining := grace - elapsed
if remaining < 0 {
remaining = 0
2015-08-21 01:11:40 +00:00
glog . V ( 2 ) . Infof ( "Removing pod %v after %s grace period" , pod . Name , grace )
2015-08-25 13:47:08 +00:00
nc . recordNodeEvent ( nodeName , "TerminatingEvictedPod" , fmt . Sprintf ( "Pod %s has exceeded the grace period for deletion after being evicted from Node %q and is being force killed" , pod . Name , nodeName ) )
2015-08-21 01:11:40 +00:00
if err := nc . kubeClient . Pods ( pod . Namespace ) . Delete ( pod . Name , api . NewDeleteOptions ( 0 ) ) ; err != nil {
glog . Errorf ( "Error completing deletion of pod %s: %v" , pod . Name , err )
complete = false
}
} else {
2015-08-25 13:47:08 +00:00
glog . V ( 2 ) . Infof ( "Pod %v still terminating, requested grace period %s, %s remaining" , pod . Name , grace , remaining )
2015-08-21 01:11:40 +00:00
complete = false
}
2015-08-25 13:47:08 +00:00
if nextAttempt < remaining {
nextAttempt = remaining
2015-08-21 01:11:40 +00:00
}
}
2015-08-25 13:47:08 +00:00
return complete , nextAttempt , nil
2015-08-21 01:11:40 +00:00
}