2019-01-12 04:58:27 +00:00
|
|
|
/*
|
|
|
|
Copyright 2016 The Kubernetes Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package kubelet
|
|
|
|
|
|
|
|
import (
|
2019-08-30 18:33:25 +00:00
|
|
|
"context"
|
2019-01-12 04:58:27 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
goruntime "runtime"
|
|
|
|
"sort"
|
2019-09-27 21:51:53 +00:00
|
|
|
"strings"
|
2019-01-12 04:58:27 +00:00
|
|
|
"time"
|
|
|
|
|
2019-12-12 01:27:03 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2019-01-12 04:58:27 +00:00
|
|
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
|
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/types"
|
2020-08-10 17:43:49 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
2019-08-30 18:33:25 +00:00
|
|
|
cloudprovider "k8s.io/cloud-provider"
|
2020-03-26 21:07:15 +00:00
|
|
|
cloudproviderapi "k8s.io/cloud-provider/api"
|
2020-08-10 17:43:49 +00:00
|
|
|
"k8s.io/klog/v2"
|
2019-01-12 04:58:27 +00:00
|
|
|
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
|
|
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
|
|
|
"k8s.io/kubernetes/pkg/kubelet/events"
|
|
|
|
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
|
|
|
"k8s.io/kubernetes/pkg/kubelet/util"
|
|
|
|
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
|
|
|
taintutil "k8s.io/kubernetes/pkg/util/taints"
|
|
|
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
|
|
|
)
|
|
|
|
|
|
|
|
// registerWithAPIServer registers the node with the cluster master. It is safe
|
|
|
|
// to call multiple times, but not concurrently (kl.registrationCompleted is
|
|
|
|
// not locked).
|
|
|
|
func (kl *Kubelet) registerWithAPIServer() {
|
|
|
|
if kl.registrationCompleted {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
step := 100 * time.Millisecond
|
|
|
|
|
|
|
|
for {
|
|
|
|
time.Sleep(step)
|
|
|
|
step = step * 2
|
|
|
|
if step >= 7*time.Second {
|
|
|
|
step = 7 * time.Second
|
|
|
|
}
|
|
|
|
|
2019-12-12 01:27:03 +00:00
|
|
|
node, err := kl.initialNode(context.TODO())
|
2019-01-12 04:58:27 +00:00
|
|
|
if err != nil {
|
|
|
|
klog.Errorf("Unable to construct v1.Node object for kubelet: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
klog.Infof("Attempting to register node %s", node.Name)
|
|
|
|
registered := kl.tryRegisterWithAPIServer(node)
|
|
|
|
if registered {
|
|
|
|
klog.Infof("Successfully registered node %s", node.Name)
|
|
|
|
kl.registrationCompleted = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// tryRegisterWithAPIServer makes an attempt to register the given node with
|
|
|
|
// the API server, returning a boolean indicating whether the attempt was
|
|
|
|
// successful. If a node with the same name already exists, it reconciles the
|
|
|
|
// value of the annotation for controller-managed attach-detach of attachable
|
|
|
|
// persistent volumes for the node.
|
|
|
|
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
2020-03-26 21:07:15 +00:00
|
|
|
_, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
|
2019-01-12 04:58:27 +00:00
|
|
|
if err == nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if !apierrors.IsAlreadyExists(err) {
|
|
|
|
klog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-03-26 21:07:15 +00:00
|
|
|
existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), metav1.GetOptions{})
|
2019-01-12 04:58:27 +00:00
|
|
|
if err != nil {
|
|
|
|
klog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if existingNode == nil {
|
|
|
|
klog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
originalNode := existingNode.DeepCopy()
|
|
|
|
|
|
|
|
klog.Infof("Node %s was previously registered", kl.nodeName)
|
|
|
|
|
|
|
|
// Edge case: the node was previously registered; reconcile
|
|
|
|
// the value of the controller-managed attach-detach
|
|
|
|
// annotation.
|
|
|
|
requiresUpdate := kl.reconcileCMADAnnotationWithExistingNode(node, existingNode)
|
|
|
|
requiresUpdate = kl.updateDefaultLabels(node, existingNode) || requiresUpdate
|
|
|
|
requiresUpdate = kl.reconcileExtendedResource(node, existingNode) || requiresUpdate
|
2020-08-10 17:43:49 +00:00
|
|
|
requiresUpdate = kl.reconcileHugePageResource(node, existingNode) || requiresUpdate
|
2019-01-12 04:58:27 +00:00
|
|
|
if requiresUpdate {
|
|
|
|
if _, _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, existingNode); err != nil {
|
|
|
|
klog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-08-10 17:43:49 +00:00
|
|
|
// reconcileHugePageResource will update huge page capacity for each page size and remove huge page sizes no longer supported
|
|
|
|
func (kl *Kubelet) reconcileHugePageResource(initialNode, existingNode *v1.Node) bool {
|
|
|
|
requiresUpdate := false
|
|
|
|
supportedHugePageResources := sets.String{}
|
|
|
|
|
|
|
|
for resourceName := range initialNode.Status.Capacity {
|
|
|
|
if !v1helper.IsHugePageResourceName(resourceName) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
supportedHugePageResources.Insert(string(resourceName))
|
|
|
|
|
|
|
|
initialCapacity := initialNode.Status.Capacity[resourceName]
|
|
|
|
initialAllocatable := initialNode.Status.Allocatable[resourceName]
|
|
|
|
|
|
|
|
capacity, resourceIsSupported := existingNode.Status.Capacity[resourceName]
|
|
|
|
allocatable := existingNode.Status.Allocatable[resourceName]
|
|
|
|
|
|
|
|
// Add or update capacity if it the size was previously unsupported or has changed
|
|
|
|
if !resourceIsSupported || capacity.Cmp(initialCapacity) != 0 {
|
|
|
|
existingNode.Status.Capacity[resourceName] = initialCapacity.DeepCopy()
|
|
|
|
requiresUpdate = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add or update allocatable if it the size was previously unsupported or has changed
|
|
|
|
if !resourceIsSupported || allocatable.Cmp(initialAllocatable) != 0 {
|
|
|
|
existingNode.Status.Allocatable[resourceName] = initialAllocatable.DeepCopy()
|
|
|
|
requiresUpdate = true
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
for resourceName := range existingNode.Status.Capacity {
|
|
|
|
if !v1helper.IsHugePageResourceName(resourceName) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If huge page size no longer is supported, we remove it from the node
|
|
|
|
if !supportedHugePageResources.Has(string(resourceName)) {
|
|
|
|
delete(existingNode.Status.Capacity, resourceName)
|
|
|
|
delete(existingNode.Status.Allocatable, resourceName)
|
|
|
|
klog.Infof("Removing now unsupported huge page resource named: %s", resourceName)
|
|
|
|
requiresUpdate = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return requiresUpdate
|
|
|
|
}
|
|
|
|
|
2019-01-12 04:58:27 +00:00
|
|
|
// Zeros out extended resource capacity during reconciliation.
|
|
|
|
func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool {
|
|
|
|
requiresUpdate := false
|
2019-08-30 18:33:25 +00:00
|
|
|
// Check with the device manager to see if node has been recreated, in which case extended resources should be zeroed until they are available
|
|
|
|
if kl.containerManager.ShouldResetExtendedResourceCapacity() {
|
|
|
|
for k := range node.Status.Capacity {
|
|
|
|
if v1helper.IsExtendedResourceName(k) {
|
|
|
|
klog.Infof("Zero out resource %s capacity in existing node.", k)
|
|
|
|
node.Status.Capacity[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
|
|
|
|
node.Status.Allocatable[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
|
|
|
|
requiresUpdate = true
|
|
|
|
}
|
2019-01-12 04:58:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return requiresUpdate
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateDefaultLabels will set the default labels on the node
|
|
|
|
func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool {
|
|
|
|
defaultLabels := []string{
|
2019-04-07 17:07:55 +00:00
|
|
|
v1.LabelHostname,
|
2019-12-12 01:27:03 +00:00
|
|
|
v1.LabelZoneFailureDomainStable,
|
|
|
|
v1.LabelZoneRegionStable,
|
2019-04-07 17:07:55 +00:00
|
|
|
v1.LabelZoneFailureDomain,
|
|
|
|
v1.LabelZoneRegion,
|
2019-12-12 01:27:03 +00:00
|
|
|
v1.LabelInstanceTypeStable,
|
2019-04-07 17:07:55 +00:00
|
|
|
v1.LabelInstanceType,
|
|
|
|
v1.LabelOSStable,
|
|
|
|
v1.LabelArchStable,
|
2019-12-12 01:27:03 +00:00
|
|
|
v1.LabelWindowsBuild,
|
2019-01-12 04:58:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
needsUpdate := false
|
|
|
|
if existingNode.Labels == nil {
|
|
|
|
existingNode.Labels = make(map[string]string)
|
|
|
|
}
|
|
|
|
//Set default labels but make sure to not set labels with empty values
|
|
|
|
for _, label := range defaultLabels {
|
|
|
|
if _, hasInitialValue := initialNode.Labels[label]; !hasInitialValue {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if existingNode.Labels[label] != initialNode.Labels[label] {
|
|
|
|
existingNode.Labels[label] = initialNode.Labels[label]
|
|
|
|
needsUpdate = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if existingNode.Labels[label] == "" {
|
|
|
|
delete(existingNode.Labels, label)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return needsUpdate
|
|
|
|
}
|
|
|
|
|
|
|
|
// reconcileCMADAnnotationWithExistingNode reconciles the controller-managed
|
|
|
|
// attach-detach annotation on a new node and the existing node, returning
|
|
|
|
// whether the existing node must be updated.
|
|
|
|
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool {
|
|
|
|
var (
|
|
|
|
existingCMAAnnotation = existingNode.Annotations[volutil.ControllerManagedAttachAnnotation]
|
|
|
|
newCMAAnnotation, newSet = node.Annotations[volutil.ControllerManagedAttachAnnotation]
|
|
|
|
)
|
|
|
|
|
|
|
|
if newCMAAnnotation == existingCMAAnnotation {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the just-constructed node and the existing node do
|
|
|
|
// not have the same value, update the existing node with
|
|
|
|
// the correct value of the annotation.
|
|
|
|
if !newSet {
|
|
|
|
klog.Info("Controller attach-detach setting changed to false; updating existing Node")
|
|
|
|
delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation)
|
|
|
|
} else {
|
|
|
|
klog.Info("Controller attach-detach setting changed to true; updating existing Node")
|
|
|
|
if existingNode.Annotations == nil {
|
|
|
|
existingNode.Annotations = make(map[string]string)
|
|
|
|
}
|
|
|
|
existingNode.Annotations[volutil.ControllerManagedAttachAnnotation] = newCMAAnnotation
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// initialNode constructs the initial v1.Node for this Kubelet, incorporating node
|
|
|
|
// labels, information from the cloud provider, and Kubelet configuration.
|
2019-12-12 01:27:03 +00:00
|
|
|
func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
|
2019-01-12 04:58:27 +00:00
|
|
|
node := &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: string(kl.nodeName),
|
|
|
|
Labels: map[string]string{
|
2020-08-10 17:43:49 +00:00
|
|
|
v1.LabelHostname: kl.hostname,
|
|
|
|
v1.LabelOSStable: goruntime.GOOS,
|
|
|
|
v1.LabelArchStable: goruntime.GOARCH,
|
2019-01-12 04:58:27 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Spec: v1.NodeSpec{
|
|
|
|
Unschedulable: !kl.registerSchedulable,
|
|
|
|
},
|
|
|
|
}
|
2019-12-12 01:27:03 +00:00
|
|
|
osLabels, err := getOSSpecificLabels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for label, value := range osLabels {
|
|
|
|
node.Labels[label] = value
|
|
|
|
}
|
|
|
|
|
2019-01-12 04:58:27 +00:00
|
|
|
nodeTaints := make([]v1.Taint, 0)
|
|
|
|
if len(kl.registerWithTaints) > 0 {
|
|
|
|
taints := make([]v1.Taint, len(kl.registerWithTaints))
|
|
|
|
for i := range kl.registerWithTaints {
|
|
|
|
if err := k8s_api_v1.Convert_core_Taint_To_v1_Taint(&kl.registerWithTaints[i], &taints[i], nil); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nodeTaints = append(nodeTaints, taints...)
|
|
|
|
}
|
|
|
|
|
|
|
|
unschedulableTaint := v1.Taint{
|
2019-12-12 01:27:03 +00:00
|
|
|
Key: v1.TaintNodeUnschedulable,
|
2019-01-12 04:58:27 +00:00
|
|
|
Effect: v1.TaintEffectNoSchedule,
|
|
|
|
}
|
|
|
|
|
2019-12-12 01:27:03 +00:00
|
|
|
// Taint node with TaintNodeUnschedulable when initializing
|
2019-01-12 04:58:27 +00:00
|
|
|
// node to avoid race condition; refer to #63897 for more detail.
|
2019-12-12 01:27:03 +00:00
|
|
|
if node.Spec.Unschedulable &&
|
|
|
|
!taintutil.TaintExists(nodeTaints, &unschedulableTaint) {
|
|
|
|
nodeTaints = append(nodeTaints, unschedulableTaint)
|
2019-01-12 04:58:27 +00:00
|
|
|
}
|
|
|
|
|
2019-04-07 17:07:55 +00:00
|
|
|
if kl.externalCloudProvider {
|
|
|
|
taint := v1.Taint{
|
2020-03-26 21:07:15 +00:00
|
|
|
Key: cloudproviderapi.TaintExternalCloudProvider,
|
2019-04-07 17:07:55 +00:00
|
|
|
Value: "true",
|
|
|
|
Effect: v1.TaintEffectNoSchedule,
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeTaints = append(nodeTaints, taint)
|
|
|
|
}
|
2019-01-12 04:58:27 +00:00
|
|
|
if len(nodeTaints) > 0 {
|
|
|
|
node.Spec.Taints = nodeTaints
|
|
|
|
}
|
|
|
|
// Initially, set NodeNetworkUnavailable to true.
|
|
|
|
if kl.providerRequiresNetworkingConfiguration() {
|
|
|
|
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
|
|
|
Type: v1.NodeNetworkUnavailable,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "NoRouteCreated",
|
|
|
|
Message: "Node created without a route",
|
|
|
|
LastTransitionTime: metav1.NewTime(kl.clock.Now()),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if kl.enableControllerAttachDetach {
|
|
|
|
if node.Annotations == nil {
|
|
|
|
node.Annotations = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
2020-08-10 17:43:49 +00:00
|
|
|
klog.V(2).Infof("Setting node annotation to enable volume controller attach/detach")
|
2019-01-12 04:58:27 +00:00
|
|
|
node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true"
|
|
|
|
} else {
|
2020-08-10 17:43:49 +00:00
|
|
|
klog.V(2).Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
|
2019-01-12 04:58:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if kl.keepTerminatedPodVolumes {
|
|
|
|
if node.Annotations == nil {
|
|
|
|
node.Annotations = make(map[string]string)
|
|
|
|
}
|
2020-08-10 17:43:49 +00:00
|
|
|
klog.V(2).Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node")
|
2019-01-12 04:58:27 +00:00
|
|
|
node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true"
|
|
|
|
}
|
|
|
|
|
|
|
|
// @question: should this be place after the call to the cloud provider? which also applies labels
|
|
|
|
for k, v := range kl.nodeLabels {
|
|
|
|
if cv, found := node.ObjectMeta.Labels[k]; found {
|
|
|
|
klog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv)
|
|
|
|
}
|
|
|
|
node.ObjectMeta.Labels[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
if kl.providerID != "" {
|
|
|
|
node.Spec.ProviderID = kl.providerID
|
|
|
|
}
|
|
|
|
|
2019-08-30 18:33:25 +00:00
|
|
|
if kl.cloud != nil {
|
|
|
|
instances, ok := kl.cloud.Instances()
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("failed to get instances from cloud provider")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: We can't assume that the node has credentials to talk to the
|
|
|
|
// cloudprovider from arbitrary nodes. At most, we should talk to a
|
|
|
|
// local metadata server here.
|
|
|
|
var err error
|
|
|
|
if node.Spec.ProviderID == "" {
|
2019-12-12 01:27:03 +00:00
|
|
|
node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(ctx, kl.cloud, kl.nodeName)
|
2019-08-30 18:33:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-12 01:27:03 +00:00
|
|
|
instanceType, err := instances.InstanceType(ctx, kl.nodeName)
|
2019-08-30 18:33:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if instanceType != "" {
|
|
|
|
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType)
|
|
|
|
node.ObjectMeta.Labels[v1.LabelInstanceType] = instanceType
|
2019-12-12 01:27:03 +00:00
|
|
|
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceTypeStable, instanceType)
|
|
|
|
node.ObjectMeta.Labels[v1.LabelInstanceTypeStable] = instanceType
|
2019-08-30 18:33:25 +00:00
|
|
|
}
|
|
|
|
// If the cloud has zone information, label the node with the zone information
|
|
|
|
zones, ok := kl.cloud.Zones()
|
|
|
|
if ok {
|
2019-12-12 01:27:03 +00:00
|
|
|
zone, err := zones.GetZone(ctx)
|
2019-08-30 18:33:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err)
|
|
|
|
}
|
|
|
|
if zone.FailureDomain != "" {
|
|
|
|
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomain, zone.FailureDomain)
|
|
|
|
node.ObjectMeta.Labels[v1.LabelZoneFailureDomain] = zone.FailureDomain
|
2019-12-12 01:27:03 +00:00
|
|
|
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomainStable, zone.FailureDomain)
|
|
|
|
node.ObjectMeta.Labels[v1.LabelZoneFailureDomainStable] = zone.FailureDomain
|
2019-08-30 18:33:25 +00:00
|
|
|
}
|
|
|
|
if zone.Region != "" {
|
|
|
|
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegion, zone.Region)
|
|
|
|
node.ObjectMeta.Labels[v1.LabelZoneRegion] = zone.Region
|
2019-12-12 01:27:03 +00:00
|
|
|
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegionStable, zone.Region)
|
|
|
|
node.ObjectMeta.Labels[v1.LabelZoneRegionStable] = zone.Region
|
2019-08-30 18:33:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-12 04:58:27 +00:00
|
|
|
kl.setNodeStatus(node)
|
|
|
|
|
|
|
|
return node, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncNodeStatus should be called periodically from a goroutine.
|
|
|
|
// It synchronizes node status to master if there is any change or enough time
|
|
|
|
// passed from the last sync, registering the kubelet first if necessary.
|
|
|
|
func (kl *Kubelet) syncNodeStatus() {
|
|
|
|
kl.syncNodeStatusMux.Lock()
|
|
|
|
defer kl.syncNodeStatusMux.Unlock()
|
|
|
|
|
|
|
|
if kl.kubeClient == nil || kl.heartbeatClient == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if kl.registerNode {
|
|
|
|
// This will exit immediately if it doesn't need to do anything.
|
|
|
|
kl.registerWithAPIServer()
|
|
|
|
}
|
|
|
|
if err := kl.updateNodeStatus(); err != nil {
|
|
|
|
klog.Errorf("Unable to update node status: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateNodeStatus updates node status to master with retries if there is any
|
|
|
|
// change or enough time passed from the last sync.
|
|
|
|
func (kl *Kubelet) updateNodeStatus() error {
|
|
|
|
klog.V(5).Infof("Updating node status")
|
|
|
|
for i := 0; i < nodeStatusUpdateRetry; i++ {
|
|
|
|
if err := kl.tryUpdateNodeStatus(i); err != nil {
|
|
|
|
if i > 0 && kl.onRepeatedHeartbeatFailure != nil {
|
|
|
|
kl.onRepeatedHeartbeatFailure()
|
|
|
|
}
|
|
|
|
klog.Errorf("Error updating node status, will retry: %v", err)
|
|
|
|
} else {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return fmt.Errorf("update node status exceeds retry count")
|
|
|
|
}
|
|
|
|
|
|
|
|
// tryUpdateNodeStatus tries to update node status to master if there is any
|
|
|
|
// change or enough time passed from the last sync.
|
|
|
|
func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
|
|
|
// In large clusters, GET and PUT operations on Node objects coming
|
|
|
|
// from here are the majority of load on apiserver and etcd.
|
|
|
|
// To reduce the load on etcd, we are serving GET operations from
|
|
|
|
// apiserver cache (the data might be slightly delayed but it doesn't
|
|
|
|
// seem to cause more conflict - the delays are pretty small).
|
|
|
|
// If it result in a conflict, all retries are served directly from etcd.
|
|
|
|
opts := metav1.GetOptions{}
|
|
|
|
if tryNumber == 0 {
|
|
|
|
util.FromApiserverCache(&opts)
|
|
|
|
}
|
2020-03-26 21:07:15 +00:00
|
|
|
node, err := kl.heartbeatClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), opts)
|
2019-01-12 04:58:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
originalNode := node.DeepCopy()
|
|
|
|
if originalNode == nil {
|
|
|
|
return fmt.Errorf("nil %q node object", kl.nodeName)
|
|
|
|
}
|
|
|
|
|
2019-04-07 17:07:55 +00:00
|
|
|
podCIDRChanged := false
|
2019-09-27 21:51:53 +00:00
|
|
|
if len(node.Spec.PodCIDRs) != 0 {
|
2019-01-12 04:58:27 +00:00
|
|
|
// Pod CIDR could have been updated before, so we cannot rely on
|
|
|
|
// node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is
|
|
|
|
// actually changed.
|
2019-09-27 21:51:53 +00:00
|
|
|
podCIDRs := strings.Join(node.Spec.PodCIDRs, ",")
|
|
|
|
if podCIDRChanged, err = kl.updatePodCIDR(podCIDRs); err != nil {
|
2019-01-12 04:58:27 +00:00
|
|
|
klog.Errorf(err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kl.setNodeStatus(node)
|
|
|
|
|
|
|
|
now := kl.clock.Now()
|
2019-12-12 01:27:03 +00:00
|
|
|
if now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) {
|
2019-04-07 17:07:55 +00:00
|
|
|
if !podCIDRChanged && !nodeStatusHasChanged(&originalNode.Status, &node.Status) {
|
|
|
|
// We must mark the volumes as ReportedInUse in volume manager's dsw even
|
|
|
|
// if no changes were made to the node status (no volumes were added or removed
|
|
|
|
// from the VolumesInUse list).
|
|
|
|
//
|
|
|
|
// The reason is that on a kubelet restart, the volume manager's dsw is
|
|
|
|
// repopulated and the volume ReportedInUse is initialized to false, while the
|
|
|
|
// VolumesInUse list from the Node object still contains the state from the
|
|
|
|
// previous kubelet instantiation.
|
|
|
|
//
|
|
|
|
// Once the volumes are added to the dsw, the ReportedInUse field needs to be
|
|
|
|
// synced from the VolumesInUse list in the Node.Status.
|
|
|
|
//
|
|
|
|
// The MarkVolumesAsReportedInUse() call cannot be performed in dsw directly
|
|
|
|
// because it does not have access to the Node object.
|
|
|
|
// This also cannot be populated on node status manager init because the volume
|
|
|
|
// may not have been added to dsw at that time.
|
|
|
|
kl.volumeManager.MarkVolumesAsReportedInUse(node.Status.VolumesInUse)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2019-01-12 04:58:27 +00:00
|
|
|
|
|
|
|
// Patch the current status on the API server
|
|
|
|
updatedNode, _, err := nodeutil.PatchNodeStatus(kl.heartbeatClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, node)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
kl.lastStatusReportTime = now
|
|
|
|
kl.setLastObservedNodeAddresses(updatedNode.Status.Addresses)
|
|
|
|
// If update finishes successfully, mark the volumeInUse as reportedInUse to indicate
|
|
|
|
// those volumes are already updated in the node's status
|
|
|
|
kl.volumeManager.MarkVolumesAsReportedInUse(updatedNode.Status.VolumesInUse)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// recordNodeStatusEvent records an event of the given type with the given
|
|
|
|
// message for the node.
|
|
|
|
func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) {
|
|
|
|
klog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName)
|
|
|
|
// TODO: This requires a transaction, either both node status is updated
|
|
|
|
// and event is recorded or neither should happen, see issue #6055.
|
|
|
|
kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event)
|
|
|
|
}
|
|
|
|
|
|
|
|
// recordEvent records an event for this node, the Kubelet's nodeRef is passed to the recorder
|
|
|
|
func (kl *Kubelet) recordEvent(eventType, event, message string) {
|
|
|
|
kl.recorder.Eventf(kl.nodeRef, eventType, event, message)
|
|
|
|
}
|
|
|
|
|
|
|
|
// record if node schedulable change.
|
|
|
|
func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error {
|
|
|
|
kl.lastNodeUnschedulableLock.Lock()
|
|
|
|
defer kl.lastNodeUnschedulableLock.Unlock()
|
|
|
|
if kl.lastNodeUnschedulable != node.Spec.Unschedulable {
|
|
|
|
if node.Spec.Unschedulable {
|
|
|
|
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotSchedulable)
|
|
|
|
} else {
|
|
|
|
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeSchedulable)
|
|
|
|
}
|
|
|
|
kl.lastNodeUnschedulable = node.Spec.Unschedulable
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
|
|
|
// any fields that are currently set.
|
|
|
|
// TODO(madhusudancs): Simplify the logic for setting node conditions and
|
|
|
|
// refactor the node status condition code out to a different file.
|
|
|
|
func (kl *Kubelet) setNodeStatus(node *v1.Node) {
|
|
|
|
for i, f := range kl.setNodeStatusFuncs {
|
|
|
|
klog.V(5).Infof("Setting node status at position %v", i)
|
|
|
|
if err := f(node); err != nil {
|
2019-12-12 01:27:03 +00:00
|
|
|
klog.Errorf("Failed to set some node status fields: %s", err)
|
2019-01-12 04:58:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (kl *Kubelet) setLastObservedNodeAddresses(addresses []v1.NodeAddress) {
|
|
|
|
kl.lastObservedNodeAddressesMux.Lock()
|
|
|
|
defer kl.lastObservedNodeAddressesMux.Unlock()
|
|
|
|
kl.lastObservedNodeAddresses = addresses
|
|
|
|
}
|
|
|
|
func (kl *Kubelet) getLastObservedNodeAddresses() []v1.NodeAddress {
|
2019-08-30 18:33:25 +00:00
|
|
|
kl.lastObservedNodeAddressesMux.RLock()
|
|
|
|
defer kl.lastObservedNodeAddressesMux.RUnlock()
|
2019-01-12 04:58:27 +00:00
|
|
|
return kl.lastObservedNodeAddresses
|
|
|
|
}
|
|
|
|
|
|
|
|
// defaultNodeStatusFuncs is a factory that generates the default set of
|
|
|
|
// setNodeStatus funcs
|
|
|
|
func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
|
|
|
// if cloud is not nil, we expect the cloud resource sync manager to exist
|
2019-04-07 17:07:55 +00:00
|
|
|
var nodeAddressesFunc func() ([]v1.NodeAddress, error)
|
2019-08-30 18:33:25 +00:00
|
|
|
if kl.cloud != nil {
|
|
|
|
nodeAddressesFunc = kl.cloudResourceSyncManager.NodeAddresses
|
|
|
|
}
|
2019-01-12 04:58:27 +00:00
|
|
|
var validateHostFunc func() error
|
|
|
|
if kl.appArmorValidator != nil {
|
|
|
|
validateHostFunc = kl.appArmorValidator.ValidateHost
|
|
|
|
}
|
|
|
|
var setters []func(n *v1.Node) error
|
|
|
|
setters = append(setters,
|
2019-08-30 18:33:25 +00:00
|
|
|
nodestatus.NodeAddress(kl.nodeIP, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc),
|
2019-01-12 04:58:27 +00:00
|
|
|
nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity,
|
|
|
|
kl.containerManager.GetDevicePluginResourceCapacity, kl.containerManager.GetNodeAllocatableReservation, kl.recordEvent),
|
|
|
|
nodestatus.VersionInfo(kl.cadvisor.VersionInfo, kl.containerRuntime.Type, kl.containerRuntime.Version),
|
|
|
|
nodestatus.DaemonEndpoints(kl.daemonEndpoints),
|
|
|
|
nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList),
|
|
|
|
nodestatus.GoRuntime(),
|
|
|
|
)
|
2019-12-12 01:27:03 +00:00
|
|
|
// Volume limits
|
|
|
|
setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits))
|
|
|
|
|
2019-01-12 04:58:27 +00:00
|
|
|
setters = append(setters,
|
|
|
|
nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent),
|
|
|
|
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
|
|
|
nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
|
2019-04-07 17:07:55 +00:00
|
|
|
nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, kl.runtimeState.storageErrors, validateHostFunc, kl.containerManager.Status, kl.recordNodeStatusEvent),
|
2019-01-12 04:58:27 +00:00
|
|
|
nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse),
|
|
|
|
// TODO(mtaufen): I decided not to move this setter for now, since all it does is send an event
|
|
|
|
// and record state back to the Kubelet runtime object. In the future, I'd like to isolate
|
|
|
|
// these side-effects by decoupling the decisions to send events and partial status recording
|
|
|
|
// from the Node setters.
|
|
|
|
kl.recordNodeSchedulableEvent,
|
|
|
|
)
|
|
|
|
return setters
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate given node IP belongs to the current host
|
|
|
|
func validateNodeIP(nodeIP net.IP) error {
|
|
|
|
// Honor IP limitations set in setNodeStatus()
|
|
|
|
if nodeIP.To4() == nil && nodeIP.To16() == nil {
|
|
|
|
return fmt.Errorf("nodeIP must be a valid IP address")
|
|
|
|
}
|
|
|
|
if nodeIP.IsLoopback() {
|
|
|
|
return fmt.Errorf("nodeIP can't be loopback address")
|
|
|
|
}
|
|
|
|
if nodeIP.IsMulticast() {
|
|
|
|
return fmt.Errorf("nodeIP can't be a multicast address")
|
|
|
|
}
|
|
|
|
if nodeIP.IsLinkLocalUnicast() {
|
|
|
|
return fmt.Errorf("nodeIP can't be a link-local unicast address")
|
|
|
|
}
|
|
|
|
if nodeIP.IsUnspecified() {
|
|
|
|
return fmt.Errorf("nodeIP can't be an all zeros address")
|
|
|
|
}
|
|
|
|
|
|
|
|
addrs, err := net.InterfaceAddrs()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, addr := range addrs {
|
|
|
|
var ip net.IP
|
|
|
|
switch v := addr.(type) {
|
|
|
|
case *net.IPNet:
|
|
|
|
ip = v.IP
|
|
|
|
case *net.IPAddr:
|
|
|
|
ip = v.IP
|
|
|
|
}
|
|
|
|
if ip != nil && ip.Equal(nodeIP) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-27 21:51:53 +00:00
|
|
|
return fmt.Errorf("node IP: %q not found in the host's network interfaces", nodeIP.String())
|
2019-01-12 04:58:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// nodeStatusHasChanged compares the original node and current node's status and
|
|
|
|
// returns true if any change happens. The heartbeat timestamp is ignored.
|
|
|
|
func nodeStatusHasChanged(originalStatus *v1.NodeStatus, status *v1.NodeStatus) bool {
|
|
|
|
if originalStatus == nil && status == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if originalStatus == nil || status == nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compare node conditions here because we need to ignore the heartbeat timestamp.
|
|
|
|
if nodeConditionsHaveChanged(originalStatus.Conditions, status.Conditions) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compare other fields of NodeStatus.
|
|
|
|
originalStatusCopy := originalStatus.DeepCopy()
|
|
|
|
statusCopy := status.DeepCopy()
|
|
|
|
originalStatusCopy.Conditions = nil
|
|
|
|
statusCopy.Conditions = nil
|
|
|
|
return !apiequality.Semantic.DeepEqual(originalStatusCopy, statusCopy)
|
|
|
|
}
|
|
|
|
|
|
|
|
// nodeConditionsHaveChanged compares the original node and current node's
|
|
|
|
// conditions and returns true if any change happens. The heartbeat timestamp is
|
|
|
|
// ignored.
|
|
|
|
func nodeConditionsHaveChanged(originalConditions []v1.NodeCondition, conditions []v1.NodeCondition) bool {
|
|
|
|
if len(originalConditions) != len(conditions) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
originalConditionsCopy := make([]v1.NodeCondition, 0, len(originalConditions))
|
|
|
|
originalConditionsCopy = append(originalConditionsCopy, originalConditions...)
|
|
|
|
conditionsCopy := make([]v1.NodeCondition, 0, len(conditions))
|
|
|
|
conditionsCopy = append(conditionsCopy, conditions...)
|
|
|
|
|
|
|
|
sort.SliceStable(originalConditionsCopy, func(i, j int) bool { return originalConditionsCopy[i].Type < originalConditionsCopy[j].Type })
|
|
|
|
sort.SliceStable(conditionsCopy, func(i, j int) bool { return conditionsCopy[i].Type < conditionsCopy[j].Type })
|
|
|
|
|
|
|
|
replacedheartbeatTime := metav1.Time{}
|
|
|
|
for i := range conditionsCopy {
|
|
|
|
originalConditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
|
|
|
|
conditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
|
|
|
|
if !apiequality.Semantic.DeepEqual(&originalConditionsCopy[i], &conditionsCopy[i]) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|