Move to a structured status for dynamic Kubelet config

Updates dynamic Kubelet config to use a structured status, rather than a
node condition. This makes the status machine-readable, and thus more
useful for config orchestration.

Fixes: #56896
pull/8/head
Michael Taufen 2018-04-27 12:38:27 -07:00
parent 7e75a09db6
commit fcc1f8e7b6
29 changed files with 2655 additions and 1913 deletions

View File

@ -77278,6 +77278,27 @@
} }
} }
}, },
"io.k8s.api.core.v1.NodeConfigStatus": {
"description": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
"properties": {
"active": {
"description": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.",
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource"
},
"assigned": {
"description": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.",
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource"
},
"error": {
"description": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.",
"type": "string"
},
"lastKnownGood": {
"description": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.",
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource"
}
}
},
"io.k8s.api.core.v1.NodeDaemonEndpoints": { "io.k8s.api.core.v1.NodeDaemonEndpoints": {
"description": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", "description": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.",
"properties": { "properties": {
@ -77446,6 +77467,10 @@
"x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge" "x-kubernetes-patch-strategy": "merge"
}, },
"config": {
"description": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigStatus"
},
"daemonEndpoints": { "daemonEndpoints": {
"description": "Endpoints of daemons running on the Node.", "description": "Endpoints of daemons running on the Node.",
"$ref": "#/definitions/io.k8s.api.core.v1.NodeDaemonEndpoints" "$ref": "#/definitions/io.k8s.api.core.v1.NodeDaemonEndpoints"

View File

@ -18813,6 +18813,10 @@
"$ref": "v1.AttachedVolume" "$ref": "v1.AttachedVolume"
}, },
"description": "List of volumes that are attached to the node." "description": "List of volumes that are attached to the node."
},
"config": {
"$ref": "v1.NodeConfigStatus",
"description": "Status of the config assigned to the node via the dynamic Kubelet config feature."
} }
} }
}, },
@ -18993,6 +18997,28 @@
} }
} }
}, },
"v1.NodeConfigStatus": {
"id": "v1.NodeConfigStatus",
"description": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
"properties": {
"assigned": {
"$ref": "v1.NodeConfigSource",
"description": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned."
},
"active": {
"$ref": "v1.NodeConfigSource",
"description": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error."
},
"lastKnownGood": {
"$ref": "v1.NodeConfigSource",
"description": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future."
},
"error": {
"type": "string",
"description": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions."
}
}
},
"v1.PersistentVolumeClaimList": { "v1.PersistentVolumeClaimList": {
"id": "v1.PersistentVolumeClaimList", "id": "v1.PersistentVolumeClaimList",
"description": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", "description": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.",

View File

@ -198,25 +198,43 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
} }
} }
// TODO(#63305): always validate the combination of the local config file and flags, this is the fallback
// when the dynamic config controller tells us to use local config (this can be fixed alongside other validation fixes).
// use dynamic kubelet config, if enabled // use dynamic kubelet config, if enabled
var kubeletConfigController *dynamickubeletconfig.Controller var kubeletConfigController *dynamickubeletconfig.Controller
if dynamicConfigDir := kubeletFlags.DynamicConfigDir.Value(); len(dynamicConfigDir) > 0 { if dynamicConfigDir := kubeletFlags.DynamicConfigDir.Value(); len(dynamicConfigDir) > 0 {
kubeletConfig, kubeletConfigController, err = BootstrapKubeletConfigController(kubeletConfig, dynamicConfigDir) var dynamicKubeletConfig *kubeletconfiginternal.KubeletConfiguration
dynamicKubeletConfig, kubeletConfigController, err = BootstrapKubeletConfigController(dynamicConfigDir)
if err != nil { if err != nil {
glog.Fatal(err) glog.Fatal(err)
} }
// We must enforce flag precedence by re-parsing the command line into the new object. // If we should just use our existing, local config, the controller will return a nil config
// This is necessary to preserve backwards-compatibility across binary upgrades. if dynamicKubeletConfig != nil {
// See issue #56171 for more details. kubeletConfig = dynamicKubeletConfig
if err := kubeletConfigFlagPrecedence(kubeletConfig, args); err != nil { // We must enforce flag precedence by re-parsing the command line into the new object.
glog.Fatal(err) // This is necessary to preserve backwards-compatibility across binary upgrades.
} // See issue #56171 for more details.
// update feature gates based on new config if err := kubeletConfigFlagPrecedence(kubeletConfig, args); err != nil {
if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { glog.Fatal(err)
glog.Fatal(err) }
// update feature gates based on new config
if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil {
glog.Fatal(err)
}
} }
} }
// TODO(#63305): need to reconcile that validation performed inside the dynamic config controller
// will happen against currently set feature gates, rather than future adjustments from combination of files
// and flags. There's a potential scenario where a valid config (because it sets new gates) is considered
// invalid against current gates (at least until --feature-gates flag is removed).
// We should validate against the combination of current feature gates, overrides from feature gates in the file,
// and overrides from feature gates set via flags, rather than currently set feature gates.
// Once the --feature-gates flag is removed, we should strictly validate against the combination of current
// feature gates and feature gates in the file (always need to validate against the combo, because feature-gates
// can layer between the file and dynamic config right now - though maybe we should change this).
// construct a KubeletServer from kubeletFlags and kubeletConfig // construct a KubeletServer from kubeletFlags and kubeletConfig
kubeletServer := &options.KubeletServer{ kubeletServer := &options.KubeletServer{
KubeletFlags: *kubeletFlags, KubeletFlags: *kubeletFlags,
@ -1089,8 +1107,7 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) {
} }
// BootstrapKubeletConfigController constructs and bootstrap a configuration controller // BootstrapKubeletConfigController constructs and bootstrap a configuration controller
func BootstrapKubeletConfigController(defaultConfig *kubeletconfiginternal.KubeletConfiguration, func BootstrapKubeletConfigController(dynamicConfigDir string) (*kubeletconfiginternal.KubeletConfiguration, *dynamickubeletconfig.Controller, error) {
dynamicConfigDir string) (*kubeletconfiginternal.KubeletConfiguration, *dynamickubeletconfig.Controller, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
return nil, nil, fmt.Errorf("failed to bootstrap Kubelet config controller, you must enable the DynamicKubeletConfig feature gate") return nil, nil, fmt.Errorf("failed to bootstrap Kubelet config controller, you must enable the DynamicKubeletConfig feature gate")
} }
@ -1104,7 +1121,7 @@ func BootstrapKubeletConfigController(defaultConfig *kubeletconfiginternal.Kubel
return nil, nil, fmt.Errorf("failed to get absolute path for --dynamic-config-dir=%s", dynamicConfigDir) return nil, nil, fmt.Errorf("failed to get absolute path for --dynamic-config-dir=%s", dynamicConfigDir)
} }
// get the latest KubeletConfiguration checkpoint from disk, or return the default config if no valid checkpoints exist // get the latest KubeletConfiguration checkpoint from disk, or return the default config if no valid checkpoints exist
c := dynamickubeletconfig.NewController(defaultConfig, dir) c := dynamickubeletconfig.NewController(dir)
kc, err := c.Bootstrap() kc, err := c.Bootstrap()
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to determine a valid configuration, error: %v", err) return nil, nil, fmt.Errorf("failed to determine a valid configuration, error: %v", err)

View File

@ -2996,6 +2996,13 @@ The resulting set of endpoints can be viewed as:<br>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_attachedvolume">v1.AttachedVolume</a> array</p></td> <td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_attachedvolume">v1.AttachedVolume</a> array</p></td>
<td class="tableblock halign-left valign-top"></td> <td class="tableblock halign-left valign-top"></td>
</tr> </tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">config</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Status of the config assigned to the node via the dynamic Kubelet config feature.</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_nodeconfigstatus">v1.NodeConfigStatus</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody> </tbody>
</table> </table>
@ -6498,6 +6505,40 @@ Examples:<br>
</tbody> </tbody>
</table> </table>
</div>
<div class="sect2">
<h3 id="_v1_localvolumesource">v1.LocalVolumeSource</h3>
<div class="paragraph">
<p>Local represents directly-attached storage with node affinity (Beta feature)</p>
</div>
<table class="tableblock frame-all grid-all" style="width:100%; ">
<colgroup>
<col style="width:20%;">
<col style="width:20%;">
<col style="width:20%;">
<col style="width:20%;">
<col style="width:20%;">
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">Name</th>
<th class="tableblock halign-left valign-top">Description</th>
<th class="tableblock halign-left valign-top">Required</th>
<th class="tableblock halign-left valign-top">Schema</th>
<th class="tableblock halign-left valign-top">Default</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">path</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">The full path to the volume on the node. It can be either a directory or block device (disk, partition, &#8230;). Directories can be represented only by PersistentVolume with VolumeMode=Filesystem. Block devices can be represented only by VolumeMode=Block, which also requires the BlockVolume alpha feature gate to be enabled.</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
</div> </div>
<div class="sect2"> <div class="sect2">
<h3 id="_v1_nodeselectorterm">v1.NodeSelectorTerm</h3> <h3 id="_v1_nodeselectorterm">v1.NodeSelectorTerm</h3>
@ -6539,40 +6580,6 @@ Examples:<br>
</tbody> </tbody>
</table> </table>
</div>
<div class="sect2">
<h3 id="_v1_localvolumesource">v1.LocalVolumeSource</h3>
<div class="paragraph">
<p>Local represents directly-attached storage with node affinity (Beta feature)</p>
</div>
<table class="tableblock frame-all grid-all" style="width:100%; ">
<colgroup>
<col style="width:20%;">
<col style="width:20%;">
<col style="width:20%;">
<col style="width:20%;">
<col style="width:20%;">
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">Name</th>
<th class="tableblock halign-left valign-top">Description</th>
<th class="tableblock halign-left valign-top">Required</th>
<th class="tableblock halign-left valign-top">Schema</th>
<th class="tableblock halign-left valign-top">Default</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">path</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">The full path to the volume on the node. It can be either a directory or block device (disk, partition, &#8230;). Directories can be represented only by PersistentVolume with VolumeMode=Filesystem. Block devices can be represented only by VolumeMode=Block, which also requires the BlockVolume alpha feature gate to be enabled.</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
</div> </div>
<div class="sect2"> <div class="sect2">
<h3 id="_v1_selinuxoptions">v1.SELinuxOptions</h3> <h3 id="_v1_selinuxoptions">v1.SELinuxOptions</h3>
@ -7892,6 +7899,61 @@ Examples:<br>
<div class="sect2"> <div class="sect2">
<h3 id="_v1_uniquevolumename">v1.UniqueVolumeName</h3> <h3 id="_v1_uniquevolumename">v1.UniqueVolumeName</h3>
</div>
<div class="sect2">
<h3 id="_v1_nodeconfigstatus">v1.NodeConfigStatus</h3>
<div class="paragraph">
<p>NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.</p>
</div>
<table class="tableblock frame-all grid-all" style="width:100%; ">
<colgroup>
<col style="width:20%;">
<col style="width:20%;">
<col style="width:20%;">
<col style="width:20%;">
<col style="width:20%;">
</colgroup>
<thead>
<tr>
<th class="tableblock halign-left valign-top">Name</th>
<th class="tableblock halign-left valign-top">Description</th>
<th class="tableblock halign-left valign-top">Required</th>
<th class="tableblock halign-left valign-top">Schema</th>
<th class="tableblock halign-left valign-top">Default</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">assigned</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_nodeconfigsource">v1.NodeConfigSource</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">active</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_nodeconfigsource">v1.NodeConfigSource</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">lastKnownGood</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node&#8217;s method of determining config stability and correctness, as this may change or become configurable in the future.</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_nodeconfigsource">v1.NodeConfigSource</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">error</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
</div> </div>
<div class="sect2"> <div class="sect2">
<h3 id="_v1_csipersistentvolumesource">v1.CSIPersistentVolumeSource</h3> <h3 id="_v1_csipersistentvolumesource">v1.CSIPersistentVolumeSource</h3>

View File

@ -3348,6 +3348,53 @@ type NodeSystemInfo struct {
Architecture string Architecture string
} }
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
type NodeConfigStatus struct {
// Assigned reports the checkpointed config the node will try to use.
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
// config payload to local disk, along with a record indicating intended
// config. The node refers to this record to choose its config checkpoint, and
// reports this record in Assigned. Assigned only updates in the status after
// the record has been checkpointed to disk. When the Kubelet is restarted,
// it tries to make the Assigned config the Active config by loading and
// validating the checkpointed payload identified by Assigned.
// +optional
Assigned *NodeConfigSource
// Active reports the checkpointed config the node is actively using.
// Active will represent either the current version of the Assigned config,
// or the current LastKnownGood config, depending on whether attempting to use the
// Assigned config results in an error.
// +optional
Active *NodeConfigSource
// LastKnownGood reports the checkpointed config the node will fall back to
// when it encounters an error attempting to use the Assigned config.
// The Assigned config becomes the LastKnownGood config when the node determines
// that the Assigned config is stable and correct.
// This is currently implemented as a 10-minute soak period starting when the local
// record of Assigned config is updated. If the Assigned config is Active at the end
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
// because the local default config is always assumed good.
// You should not make assumptions about the node's method of determining config stability
// and correctness, as this may change or become configurable in the future.
// +optional
LastKnownGood *NodeConfigSource
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
// to load or validate the Assigned config, etc.
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
// by fixing the config assigned in Spec.ConfigSource.
// You can find additional information for debugging by searching the error message in the Kubelet log.
// Error is a human-readable description of the error state; machines can check whether or not Error
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
// +optional
Error string
}
// NodeStatus is information about the current status of a node. // NodeStatus is information about the current status of a node.
type NodeStatus struct { type NodeStatus struct {
// Capacity represents the total resources of a node. // Capacity represents the total resources of a node.
@ -3380,6 +3427,9 @@ type NodeStatus struct {
// List of volumes that are attached to the node. // List of volumes that are attached to the node.
// +optional // +optional
VolumesAttached []AttachedVolume VolumesAttached []AttachedVolume
// Status of the config assigned to the node via the dynamic Kubelet config feature.
// +optional
Config *NodeConfigStatus
} }
type UniqueVolumeName string type UniqueVolumeName string
@ -3464,8 +3514,6 @@ const (
NodeDiskPressure NodeConditionType = "DiskPressure" NodeDiskPressure NodeConditionType = "DiskPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured. // NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
// NodeKubeletConfigOk indicates whether the kubelet is correctly configured
NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
) )
type NodeCondition struct { type NodeCondition struct {

View File

@ -210,6 +210,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_core_NodeCondition_To_v1_NodeCondition, Convert_core_NodeCondition_To_v1_NodeCondition,
Convert_v1_NodeConfigSource_To_core_NodeConfigSource, Convert_v1_NodeConfigSource_To_core_NodeConfigSource,
Convert_core_NodeConfigSource_To_v1_NodeConfigSource, Convert_core_NodeConfigSource_To_v1_NodeConfigSource,
Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus,
Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus,
Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints, Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints,
Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints,
Convert_v1_NodeList_To_core_NodeList, Convert_v1_NodeList_To_core_NodeList,
@ -2635,6 +2637,32 @@ func Convert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSou
return autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in, out, s) return autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in, out, s)
} }
func autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *v1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error {
out.Assigned = (*core.NodeConfigSource)(unsafe.Pointer(in.Assigned))
out.Active = (*core.NodeConfigSource)(unsafe.Pointer(in.Active))
out.LastKnownGood = (*core.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood))
out.Error = in.Error
return nil
}
// Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus is an autogenerated conversion function.
func Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *v1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error {
return autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in, out, s)
}
func autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *v1.NodeConfigStatus, s conversion.Scope) error {
out.Assigned = (*v1.NodeConfigSource)(unsafe.Pointer(in.Assigned))
out.Active = (*v1.NodeConfigSource)(unsafe.Pointer(in.Active))
out.LastKnownGood = (*v1.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood))
out.Error = in.Error
return nil
}
// Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus is an autogenerated conversion function.
func Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *v1.NodeConfigStatus, s conversion.Scope) error {
return autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in, out, s)
}
func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error {
if err := Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { if err := Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil {
return err return err
@ -2832,6 +2860,7 @@ func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.N
out.Images = *(*[]core.ContainerImage)(unsafe.Pointer(&in.Images)) out.Images = *(*[]core.ContainerImage)(unsafe.Pointer(&in.Images))
out.VolumesInUse = *(*[]core.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) out.VolumesInUse = *(*[]core.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse))
out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached))
out.Config = (*core.NodeConfigStatus)(unsafe.Pointer(in.Config))
return nil return nil
} }
@ -2855,6 +2884,7 @@ func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.N
out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images)) out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images))
out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse))
out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached))
out.Config = (*v1.NodeConfigStatus)(unsafe.Pointer(in.Config))
return nil return nil
} }

View File

@ -4010,9 +4010,14 @@ func ValidateNode(node *core.Node) field.ErrorList {
// That said, if specified, we need to ensure they are valid. // That said, if specified, we need to ensure they are valid.
allErrs = append(allErrs, ValidateNodeResources(node)...) allErrs = append(allErrs, ValidateNodeResources(node)...)
// Only allow Node.Spec.ConfigSource to be set if the DynamicKubeletConfig feature gate is enabled // Only allow Spec.ConfigSource and Status.Config to be set if the DynamicKubeletConfig feature gate is enabled
if node.Spec.ConfigSource != nil && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)")) if node.Spec.ConfigSource != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)"))
}
if node.Status.Config != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "config"), "config may only be set if the DynamicKubeletConfig feature gate is enabled)"))
}
} }
if len(node.Spec.PodCIDR) != 0 { if len(node.Spec.PodCIDR) != 0 {
@ -4097,6 +4102,18 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
} }
} }
// Allow and validate updates to Node.Spec.ConfigSource and Node.Status.Config if DynamicKubeletConfig feature gate is enabled
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
if node.Spec.ConfigSource != nil {
allErrs = append(allErrs, validateNodeConfigSourceSpec(node.Spec.ConfigSource, field.NewPath("spec", "configSource"))...)
}
oldNode.Spec.ConfigSource = node.Spec.ConfigSource
if node.Status.Config != nil {
allErrs = append(allErrs, validateNodeConfigStatus(node.Status.Config, field.NewPath("status", "config"))...)
}
oldNode.Status.Config = node.Status.Config
}
// TODO: move reset function to its own location // TODO: move reset function to its own location
// Ignore metadata changes now that they have been tested // Ignore metadata changes now that they have been tested
oldNode.ObjectMeta = node.ObjectMeta oldNode.ObjectMeta = node.ObjectMeta
@ -4113,14 +4130,6 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
} }
oldNode.Spec.Taints = node.Spec.Taints oldNode.Spec.Taints = node.Spec.Taints
// Allow updates to Node.Spec.ConfigSource if DynamicKubeletConfig feature gate is enabled
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
if node.Spec.ConfigSource != nil {
allErrs = append(allErrs, validateNodeConfigSourceSpec(node.Spec.ConfigSource, field.NewPath("spec", "configSource"))...)
}
oldNode.Spec.ConfigSource = node.Spec.ConfigSource
}
// We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields. // We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields.
// TODO: Add a 'real' error type for this error and provide print actual diffs. // TODO: Add a 'real' error type for this error and provide print actual diffs.
if !apiequality.Semantic.DeepEqual(oldNode, node) { if !apiequality.Semantic.DeepEqual(oldNode, node) {
@ -4162,12 +4171,56 @@ func validateConfigMapNodeConfigSourceSpec(source *core.ConfigMapNodeConfigSourc
return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...) return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...)
} }
// validation specififc to Node.Status.Config
func validateNodeConfigStatus(status *core.NodeConfigStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if status.Assigned != nil {
allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Assigned, fldPath.Child("assigned"))...)
}
if status.Active != nil {
allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Active, fldPath.Child("active"))...)
}
if status.LastKnownGood != nil {
allErrs = append(allErrs, validateNodeConfigSourceStatus(status.LastKnownGood, fldPath.Child("lastKnownGood"))...)
}
return allErrs
}
// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood)
func validateNodeConfigSourceStatus(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
count := int(0)
if source.ConfigMap != nil {
count++
allErrs = append(allErrs, validateConfigMapNodeConfigSourceStatus(source.ConfigMap, fldPath.Child("configMap"))...)
}
// add more subfields here in the future as they are added to NodeConfigSource
// exactly one reference subfield must be non-nil
if count != 1 {
allErrs = append(allErrs, field.Invalid(fldPath, source, "exactly one reference subfield must be non-nil"))
}
return allErrs
}
// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood).ConfigMap
func validateConfigMapNodeConfigSourceStatus(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if string(source.UID) == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("uid"), "uid must be set in status"))
}
// TODO(#63221): require ResourceVersion in status when we start respecting ConfigMap mutations (the Kubelet isn't tracking it internally until
// that PR, which makes it difficult to report for now).
return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...)
}
// common validation // common validation
func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList { func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
// validate target configmap namespace // validate target configmap namespace
if source.Namespace == "" { if source.Namespace == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "namespace must be set in spec")) allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "namespace must be set"))
} else { } else {
for _, msg := range ValidateNameFunc(ValidateNamespaceName)(source.Namespace, false) { for _, msg := range ValidateNameFunc(ValidateNamespaceName)(source.Namespace, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), source.Namespace, msg)) allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), source.Namespace, msg))
@ -4175,7 +4228,7 @@ func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, f
} }
// validate target configmap name // validate target configmap name
if source.Name == "" { if source.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name must be set in spec")) allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name must be set"))
} else { } else {
for _, msg := range ValidateNameFunc(ValidateConfigMapName)(source.Name, false) { for _, msg := range ValidateNameFunc(ValidateConfigMapName)(source.Name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), source.Name, msg)) allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), source.Name, msg))
@ -4183,7 +4236,7 @@ func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, f
} }
// validate kubeletConfigKey against rules for configMap key names // validate kubeletConfigKey against rules for configMap key names
if source.KubeletConfigKey == "" { if source.KubeletConfigKey == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("kubeletConfigKey"), "kubeletConfigKey must be set in spec")) allErrs = append(allErrs, field.Required(fldPath.Child("kubeletConfigKey"), "kubeletConfigKey must be set"))
} else { } else {
for _, msg := range validation.IsConfigMapKey(source.KubeletConfigKey) { for _, msg := range validation.IsConfigMapKey(source.KubeletConfigKey) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("kubeletConfigKey"), source.KubeletConfigKey, msg)) allErrs = append(allErrs, field.Invalid(fldPath.Child("kubeletConfigKey"), source.KubeletConfigKey, msg))

View File

@ -2402,6 +2402,49 @@ func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) {
*out = *in
if in.Assigned != nil {
in, out := &in.Assigned, &out.Assigned
if *in == nil {
*out = nil
} else {
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
}
if in.Active != nil {
in, out := &in.Active, &out.Active
if *in == nil {
*out = nil
} else {
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
}
if in.LastKnownGood != nil {
in, out := &in.LastKnownGood, &out.LastKnownGood
if *in == nil {
*out = nil
} else {
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus.
func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus {
if in == nil {
return nil
}
out := new(NodeConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) {
*out = *in *out = *in
@ -2654,6 +2697,15 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = make([]AttachedVolume, len(*in)) *out = make([]AttachedVolume, len(*in))
copy(*out, *in) copy(*out, *in)
} }
if in.Config != nil {
in, out := &in.Config, &out.Config
if *in == nil {
*out = nil
} else {
*out = new(NodeConfigStatus)
(*in).DeepCopyInto(*out)
}
}
return return
} }

View File

@ -55,13 +55,14 @@ type RemoteConfigSource interface {
// Download downloads the remote config source object returns a Payload backed by the object, // Download downloads the remote config source object returns a Payload backed by the object,
// or a sanitized failure reason and error if the download fails // or a sanitized failure reason and error if the download fails
Download(client clientset.Interface) (Payload, string, error) Download(client clientset.Interface) (Payload, string, error)
// Encode returns a []byte representation of the object behind the RemoteConfigSource // Encode returns a []byte representation of the NodeConfigSource behind the RemoteConfigSource
Encode() ([]byte, error) Encode() ([]byte, error)
// object returns the underlying source object. // NodeConfigSource returns a copy of the underlying apiv1.NodeConfigSource object.
// If you want to compare sources for equality, use EqualRemoteConfigSources, // All RemoteConfigSources are expected to be backed by a NodeConfigSource,
// which compares the underlying source objects for semantic API equality. // though the convenience methods on the interface will target the source
object() interface{} // type that was detected in a call to NewRemoteConfigSource.
NodeConfigSource() *apiv1.NodeConfigSource
} }
// NewRemoteConfigSource constructs a RemoteConfigSource from a v1/NodeConfigSource object // NewRemoteConfigSource constructs a RemoteConfigSource from a v1/NodeConfigSource object
@ -72,9 +73,10 @@ func NewRemoteConfigSource(source *apiv1.NodeConfigSource) (RemoteConfigSource,
// nil here, so that if a new API server allows a new config source type, old clients can send // nil here, so that if a new API server allows a new config source type, old clients can send
// an error message rather than crashing due to a nil pointer dereference. // an error message rather than crashing due to a nil pointer dereference.
// exactly one reference subfield of the config source must be non-nil, today ConfigMap is the only reference subfield // Exactly one reference subfield of the config source must be non-nil.
// Currently ConfigMap is the only reference subfield.
if source.ConfigMap == nil { if source.ConfigMap == nil {
return nil, status.FailSyncReasonAllNilSubfields, fmt.Errorf("%s, NodeConfigSource was: %#v", status.FailSyncReasonAllNilSubfields, source) return nil, status.AllNilSubfieldsError, fmt.Errorf("%s, NodeConfigSource was: %#v", status.AllNilSubfieldsError, source)
} }
return &remoteConfigMap{source}, "", nil return &remoteConfigMap{source}, "", nil
} }
@ -109,7 +111,7 @@ func DecodeRemoteConfigSource(data []byte) (RemoteConfigSource, error) {
// comparing the underlying API objects for semantic equality. // comparing the underlying API objects for semantic equality.
func EqualRemoteConfigSources(a, b RemoteConfigSource) bool { func EqualRemoteConfigSources(a, b RemoteConfigSource) bool {
if a != nil && b != nil { if a != nil && b != nil {
return apiequality.Semantic.DeepEqual(a.object(), b.object()) return apiequality.Semantic.DeepEqual(a.NodeConfigSource(), b.NodeConfigSource())
} }
return a == b return a == b
} }
@ -145,13 +147,12 @@ func (r *remoteConfigMap) Download(client clientset.Interface) (Payload, string,
// get the ConfigMap via namespace/name, there doesn't seem to be a way to get it by UID // get the ConfigMap via namespace/name, there doesn't seem to be a way to get it by UID
cm, err := client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(r.source.ConfigMap.Name, metav1.GetOptions{}) cm, err := client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(r.source.ConfigMap.Name, metav1.GetOptions{})
if err != nil { if err != nil {
reason = fmt.Sprintf(status.FailSyncReasonDownloadFmt, r.APIPath()) return nil, status.DownloadError, fmt.Errorf("%s, error: %v", status.DownloadError, err)
return nil, reason, fmt.Errorf("%s, error: %v", reason, err)
} }
// ensure that UID matches the UID on the source // ensure that UID matches the UID on the source
if r.source.ConfigMap.UID != cm.UID { if r.source.ConfigMap.UID != cm.UID {
reason = fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, r.source.ConfigMap.UID, r.APIPath(), cm.UID) reason = fmt.Sprintf(status.UIDMismatchErrorFmt, r.source.ConfigMap.UID, r.APIPath(), cm.UID)
return nil, reason, fmt.Errorf(reason) return nil, reason, fmt.Errorf(reason)
} }
@ -178,6 +179,6 @@ func (r *remoteConfigMap) Encode() ([]byte, error) {
return data, nil return data, nil
} }
func (r *remoteConfigMap) object() interface{} { func (r *remoteConfigMap) NodeConfigSource() *apiv1.NodeConfigSource {
return r.source return r.source.DeepCopy()
} }

View File

@ -70,7 +70,7 @@ func TestNewRemoteConfigSource(t *testing.T) {
return return
} }
// underlying object should match the object passed in // underlying object should match the object passed in
if !apiequality.Semantic.DeepEqual(c.expect.object(), source.object()) { if !apiequality.Semantic.DeepEqual(c.expect.NodeConfigSource(), source.NodeConfigSource()) {
t.Errorf("case %q, expect RemoteConfigSource %s but got %s", c.desc, spew.Sdump(c.expect), spew.Sdump(source)) t.Errorf("case %q, expect RemoteConfigSource %s but got %s", c.desc, spew.Sdump(c.expect), spew.Sdump(source))
} }
}) })

View File

@ -24,9 +24,9 @@ import (
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
) )
// so far only implements Current(), LastKnownGood(), SetCurrent(), and SetLastKnownGood() // so far only implements Assigned(), LastKnownGood(), SetAssigned(), and SetLastKnownGood()
type fakeStore struct { type fakeStore struct {
current checkpoint.RemoteConfigSource assigned checkpoint.RemoteConfigSource
lastKnownGood checkpoint.RemoteConfigSource lastKnownGood checkpoint.RemoteConfigSource
} }
@ -48,20 +48,20 @@ func (s *fakeStore) Load(source checkpoint.RemoteConfigSource) (*kubeletconfig.K
return nil, fmt.Errorf("Load method not supported") return nil, fmt.Errorf("Load method not supported")
} }
func (s *fakeStore) CurrentModified() (time.Time, error) { func (s *fakeStore) AssignedModified() (time.Time, error) {
return time.Time{}, fmt.Errorf("CurrentModified method not supported") return time.Time{}, fmt.Errorf("AssignedModified method not supported")
} }
func (s *fakeStore) Current() (checkpoint.RemoteConfigSource, error) { func (s *fakeStore) Assigned() (checkpoint.RemoteConfigSource, error) {
return s.current, nil return s.assigned, nil
} }
func (s *fakeStore) LastKnownGood() (checkpoint.RemoteConfigSource, error) { func (s *fakeStore) LastKnownGood() (checkpoint.RemoteConfigSource, error) {
return s.lastKnownGood, nil return s.lastKnownGood, nil
} }
func (s *fakeStore) SetCurrent(source checkpoint.RemoteConfigSource) error { func (s *fakeStore) SetAssigned(source checkpoint.RemoteConfigSource) error {
s.current = source s.assigned = source
return nil return nil
} }

View File

@ -31,7 +31,7 @@ import (
const ( const (
metaDir = "meta" metaDir = "meta"
currentFile = "current" assignedFile = "assigned"
lastKnownGoodFile = "last-known-good" lastKnownGoodFile = "last-known-good"
checkpointsDir = "checkpoints" checkpointsDir = "checkpoints"
@ -61,11 +61,11 @@ func (s *fsStore) Initialize() error {
if err := utilfiles.EnsureDir(s.fs, s.dir); err != nil { if err := utilfiles.EnsureDir(s.fs, s.dir); err != nil {
return err return err
} }
// ensure metadata directory and reference files (tracks current and lkg configs) // ensure metadata directory and reference files (tracks assigned and lkg configs)
if err := utilfiles.EnsureDir(s.fs, filepath.Join(s.dir, metaDir)); err != nil { if err := utilfiles.EnsureDir(s.fs, filepath.Join(s.dir, metaDir)); err != nil {
return err return err
} }
if err := utilfiles.EnsureFile(s.fs, s.metaPath(currentFile)); err != nil { if err := utilfiles.EnsureFile(s.fs, s.metaPath(assignedFile)); err != nil {
return err return err
} }
if err := utilfiles.EnsureFile(s.fs, s.metaPath(lastKnownGoodFile)); err != nil { if err := utilfiles.EnsureFile(s.fs, s.metaPath(lastKnownGoodFile)); err != nil {
@ -111,8 +111,8 @@ func (s *fsStore) Load(source checkpoint.RemoteConfigSource) (*kubeletconfig.Kub
return kc, nil return kc, nil
} }
func (s *fsStore) CurrentModified() (time.Time, error) { func (s *fsStore) AssignedModified() (time.Time, error) {
path := s.metaPath(currentFile) path := s.metaPath(assignedFile)
info, err := s.fs.Stat(path) info, err := s.fs.Stat(path)
if err != nil { if err != nil {
return time.Time{}, fmt.Errorf("failed to stat %q while checking modification time, error: %v", path, err) return time.Time{}, fmt.Errorf("failed to stat %q while checking modification time, error: %v", path, err)
@ -120,16 +120,16 @@ func (s *fsStore) CurrentModified() (time.Time, error) {
return info.ModTime(), nil return info.ModTime(), nil
} }
func (s *fsStore) Current() (checkpoint.RemoteConfigSource, error) { func (s *fsStore) Assigned() (checkpoint.RemoteConfigSource, error) {
return readRemoteConfigSource(s.fs, s.metaPath(currentFile)) return readRemoteConfigSource(s.fs, s.metaPath(assignedFile))
} }
func (s *fsStore) LastKnownGood() (checkpoint.RemoteConfigSource, error) { func (s *fsStore) LastKnownGood() (checkpoint.RemoteConfigSource, error) {
return readRemoteConfigSource(s.fs, s.metaPath(lastKnownGoodFile)) return readRemoteConfigSource(s.fs, s.metaPath(lastKnownGoodFile))
} }
func (s *fsStore) SetCurrent(source checkpoint.RemoteConfigSource) error { func (s *fsStore) SetAssigned(source checkpoint.RemoteConfigSource) error {
return writeRemoteConfigSource(s.fs, s.metaPath(currentFile), source) return writeRemoteConfigSource(s.fs, s.metaPath(assignedFile), source)
} }
func (s *fsStore) SetLastKnownGood(source checkpoint.RemoteConfigSource) error { func (s *fsStore) SetLastKnownGood(source checkpoint.RemoteConfigSource) error {

View File

@ -87,9 +87,9 @@ func TestFsStoreInitialize(t *testing.T) {
t.Fatalf("expect %q to exist, but stat failed with error: %v", store.checkpointPath(""), err) t.Fatalf("expect %q to exist, but stat failed with error: %v", store.checkpointPath(""), err)
} }
// check that currentFile exists // check that assignedFile exists
if _, err := store.fs.Stat(store.metaPath(currentFile)); err != nil { if _, err := store.fs.Stat(store.metaPath(assignedFile)); err != nil {
t.Fatalf("expect %q to exist, but stat failed with error: %v", store.metaPath(currentFile), err) t.Fatalf("expect %q to exist, but stat failed with error: %v", store.metaPath(assignedFile), err)
} }
// check that lastKnownGoodFile exists // check that lastKnownGoodFile exists
@ -270,34 +270,34 @@ func TestFsStoreLoad(t *testing.T) {
} }
} }
func TestFsStoreCurrentModified(t *testing.T) { func TestFsStoreAssignedModified(t *testing.T) {
store, err := newInitializedFakeFsStore() store, err := newInitializedFakeFsStore()
if err != nil { if err != nil {
t.Fatalf("error constructing store: %v", err) t.Fatalf("error constructing store: %v", err)
} }
// create an empty current file, this is good enough for testing // create an empty assigned file, this is good enough for testing
saveTestSourceFile(t, store, currentFile, nil) saveTestSourceFile(t, store, assignedFile, nil)
// set the timestamps to the current time, so we can compare to result of store.CurrentModified // set the timestamps to the current time, so we can compare to result of store.AssignedModified
now := time.Now() now := time.Now()
err = store.fs.Chtimes(store.metaPath(currentFile), now, now) err = store.fs.Chtimes(store.metaPath(assignedFile), now, now)
if err != nil { if err != nil {
t.Fatalf("could not change timestamps, error: %v", err) t.Fatalf("could not change timestamps, error: %v", err)
} }
// for now we hope that the system won't truncate the time to a less precise unit, // for now we hope that the system won't truncate the time to a less precise unit,
// if this test fails on certain systems that may be the reason. // if this test fails on certain systems that may be the reason.
modTime, err := store.CurrentModified() modTime, err := store.AssignedModified()
if err != nil { if err != nil {
t.Fatalf("unable to determine modification time of current config source, error: %v", err) t.Fatalf("unable to determine modification time of assigned config source, error: %v", err)
} }
if !now.Equal(modTime) { if !now.Equal(modTime) {
t.Errorf("expect %q but got %q", now.Format(time.RFC3339), modTime.Format(time.RFC3339)) t.Errorf("expect %q but got %q", now.Format(time.RFC3339), modTime.Format(time.RFC3339))
} }
} }
func TestFsStoreCurrent(t *testing.T) { func TestFsStoreAssigned(t *testing.T) {
store, err := newInitializedFakeFsStore() store, err := newInitializedFakeFsStore()
if err != nil { if err != nil {
t.Fatalf("error constructing store: %v", err) t.Fatalf("error constructing store: %v", err)
@ -325,10 +325,10 @@ func TestFsStoreCurrent(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.desc, func(t *testing.T) { t.Run(c.desc, func(t *testing.T) {
// save the last known good source // save the last known good source
saveTestSourceFile(t, store, currentFile, c.expect) saveTestSourceFile(t, store, assignedFile, c.expect)
// load last-known-good and compare to expected result // load last-known-good and compare to expected result
source, err := store.Current() source, err := store.Assigned()
utiltest.ExpectError(t, err, c.err) utiltest.ExpectError(t, err, c.err)
if err != nil { if err != nil {
return return
@ -383,7 +383,7 @@ func TestFsStoreLastKnownGood(t *testing.T) {
} }
} }
func TestFsStoreSetCurrent(t *testing.T) { func TestFsStoreSetAssigned(t *testing.T) {
store, err := newInitializedFakeFsStore() store, err := newInitializedFakeFsStore()
if err != nil { if err != nil {
t.Fatalf("error constructing store: %v", err) t.Fatalf("error constructing store: %v", err)
@ -409,15 +409,15 @@ source:
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// save the current source // save the assigned source
if err := store.SetCurrent(source); err != nil { if err := store.SetAssigned(source); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// check that the source saved as we would expect // check that the source saved as we would expect
data := readTestSourceFile(t, store, currentFile) data := readTestSourceFile(t, store, assignedFile)
if expect != string(data) { if expect != string(data) {
t.Errorf("expect current source file to contain %q, but got %q", expect, string(data)) t.Errorf("expect assigned source file to contain %q, but got %q", expect, string(data))
} }
} }
@ -485,7 +485,7 @@ func TestFsStoreReset(t *testing.T) {
} }
cases := []struct { cases := []struct {
desc string desc string
current checkpoint.RemoteConfigSource assigned checkpoint.RemoteConfigSource
lastKnownGood checkpoint.RemoteConfigSource lastKnownGood checkpoint.RemoteConfigSource
updated bool updated bool
}{ }{
@ -499,7 +499,7 @@ func TestFsStoreReset(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.desc, func(t *testing.T) { t.Run(c.desc, func(t *testing.T) {
// manually save the sources to their respective files // manually save the sources to their respective files
saveTestSourceFile(t, store, currentFile, c.current) saveTestSourceFile(t, store, assignedFile, c.assigned)
saveTestSourceFile(t, store, lastKnownGoodFile, c.lastKnownGood) saveTestSourceFile(t, store, lastKnownGoodFile, c.lastKnownGood)
// reset // reset
@ -509,15 +509,15 @@ func TestFsStoreReset(t *testing.T) {
} }
// make sure the files were emptied // make sure the files were emptied
if size := testSourceFileSize(t, store, currentFile); size > 0 { if size := testSourceFileSize(t, store, assignedFile); size > 0 {
t.Errorf("case %q, expect source file %q to be empty but got %d bytes", c.desc, currentFile, size) t.Errorf("case %q, expect source file %q to be empty but got %d bytes", c.desc, assignedFile, size)
} }
if size := testSourceFileSize(t, store, lastKnownGoodFile); size > 0 { if size := testSourceFileSize(t, store, lastKnownGoodFile); size > 0 {
t.Errorf("case %q, expect source file %q to be empty but got %d bytes", c.desc, lastKnownGoodFile, size) t.Errorf("case %q, expect source file %q to be empty but got %d bytes", c.desc, lastKnownGoodFile, size)
} }
// make sure Current() and LastKnownGood() both return nil // make sure Assigned() and LastKnownGood() both return nil
current, err := store.Current() assigned, err := store.Assigned()
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@ -525,9 +525,9 @@ func TestFsStoreReset(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
if current != nil || lastKnownGood != nil { if assigned != nil || lastKnownGood != nil {
t.Errorf("case %q, expect nil for current and last-known-good checkpoints, but still have %q and %q, respectively", t.Errorf("case %q, expect nil for assigned and last-known-good checkpoints, but still have %q and %q, respectively",
c.desc, current, lastKnownGood) c.desc, assigned, lastKnownGood)
} }
if c.updated != updated { if c.updated != updated {
t.Errorf("case %q, expect reset to return %t, but got %t", c.desc, c.updated, updated) t.Errorf("case %q, expect reset to return %t, but got %t", c.desc, c.updated, updated)

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
) )
// Store saves checkpoints and information about which is the current and last-known-good checkpoint to a storage layer // Store saves checkpoints and information about which is the assigned and last-known-good checkpoint to a storage layer
type Store interface { type Store interface {
// Initialize sets up the storage layer // Initialize sets up the storage layer
Initialize() error Initialize() error
@ -38,32 +38,32 @@ type Store interface {
// Load loads the KubeletConfiguration from the checkpoint referenced by `source`. // Load loads the KubeletConfiguration from the checkpoint referenced by `source`.
Load(source checkpoint.RemoteConfigSource) (*kubeletconfig.KubeletConfiguration, error) Load(source checkpoint.RemoteConfigSource) (*kubeletconfig.KubeletConfiguration, error)
// CurrentModified returns the last time that the current UID was set // AssignedModified returns the last time that the assigned checkpoint was set
CurrentModified() (time.Time, error) AssignedModified() (time.Time, error)
// Current returns the source that points to the current checkpoint, or nil if no current checkpoint is set // Assigned returns the source that points to the checkpoint currently assigned to the Kubelet, or nil if no assigned checkpoint is set
Current() (checkpoint.RemoteConfigSource, error) Assigned() (checkpoint.RemoteConfigSource, error)
// LastKnownGood returns the source that points to the last-known-good checkpoint, or nil if no last-known-good checkpoint is set // LastKnownGood returns the source that points to the last-known-good checkpoint, or nil if no last-known-good checkpoint is set
LastKnownGood() (checkpoint.RemoteConfigSource, error) LastKnownGood() (checkpoint.RemoteConfigSource, error)
// SetCurrent saves the source that points to the current checkpoint, set to nil to unset // SetAssigned saves the source that points to the assigned checkpoint, set to nil to unset
SetCurrent(source checkpoint.RemoteConfigSource) error SetAssigned(source checkpoint.RemoteConfigSource) error
// SetLastKnownGood saves the source that points to the last-known-good checkpoint, set to nil to unset // SetLastKnownGood saves the source that points to the last-known-good checkpoint, set to nil to unset
SetLastKnownGood(source checkpoint.RemoteConfigSource) error SetLastKnownGood(source checkpoint.RemoteConfigSource) error
// Reset unsets the current and last-known-good UIDs and returns whether the current UID was unset as a result of the reset // Reset unsets the assigned and last-known-good checkpoints and returns whether the assigned checkpoint was unset as a result of the reset
Reset() (bool, error) Reset() (bool, error)
} }
// reset is a helper for implementing Reset, which can be implemented in terms of Store methods // reset is a helper for implementing Reset, which can be implemented in terms of Store methods
func reset(s Store) (bool, error) { func reset(s Store) (bool, error) {
current, err := s.Current() assigned, err := s.Assigned()
if err != nil { if err != nil {
return false, err return false, err
} }
if err := s.SetLastKnownGood(nil); err != nil { if err := s.SetLastKnownGood(nil); err != nil {
return false, fmt.Errorf("failed to reset last-known-good UID in checkpoint store, error: %v", err) return false, fmt.Errorf("failed to reset last-known-good UID in checkpoint store, error: %v", err)
} }
if err := s.SetCurrent(nil); err != nil { if err := s.SetAssigned(nil); err != nil {
return false, fmt.Errorf("failed to reset current UID in checkpoint store, error: %v", err) return false, fmt.Errorf("failed to reset assigned UID in checkpoint store, error: %v", err)
} }
return current != nil, nil return assigned != nil, nil
} }

View File

@ -48,21 +48,21 @@ func TestReset(t *testing.T) {
s *fakeStore s *fakeStore
updated bool updated bool
}{ }{
{&fakeStore{current: nil, lastKnownGood: nil}, false}, {&fakeStore{assigned: nil, lastKnownGood: nil}, false},
{&fakeStore{current: source, lastKnownGood: nil}, true}, {&fakeStore{assigned: source, lastKnownGood: nil}, true},
{&fakeStore{current: nil, lastKnownGood: source}, false}, {&fakeStore{assigned: nil, lastKnownGood: source}, false},
{&fakeStore{current: source, lastKnownGood: source}, true}, {&fakeStore{assigned: source, lastKnownGood: source}, true},
{&fakeStore{current: source, lastKnownGood: otherSource}, true}, {&fakeStore{assigned: source, lastKnownGood: otherSource}, true},
{&fakeStore{current: otherSource, lastKnownGood: source}, true}, {&fakeStore{assigned: otherSource, lastKnownGood: source}, true},
} }
for _, c := range cases { for _, c := range cases {
updated, err := reset(c.s) updated, err := reset(c.s)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
if c.s.current != nil || c.s.lastKnownGood != nil { if c.s.assigned != nil || c.s.lastKnownGood != nil {
t.Errorf("case %q, expect nil for current and last-known-good checkpoints, but still have %q and %q, respectively", t.Errorf("case %q, expect nil for assigned and last-known-good checkpoints, but still have %q and %q, respectively",
spew.Sdump(c.s), c.s.current, c.s.lastKnownGood) spew.Sdump(c.s), c.s.assigned, c.s.lastKnownGood)
} }
if c.updated != updated { if c.updated != updated {
t.Errorf("case %q, expect reset to return %t, but got %t", spew.Sdump(c.s), c.updated, updated) t.Errorf("case %q, expect reset to return %t, but got %t", spew.Sdump(c.s), c.updated, updated)

View File

@ -71,14 +71,14 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
node, err := latestNode(cc.informer.GetStore(), nodeName) node, err := latestNode(cc.informer.GetStore(), nodeName)
if err != nil { if err != nil {
cc.configOk.SetFailSyncCondition(status.FailSyncReasonInformer) cc.configStatus.SetErrorOverride(fmt.Sprintf(status.SyncErrorFmt, status.InternalError))
syncerr = fmt.Errorf("%s, error: %v", status.FailSyncReasonInformer, err) syncerr = fmt.Errorf("%s, error: %v", status.InternalError, err)
return return
} }
// check the Node and download any new config // check the Node and download any new config
if updated, cur, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil { if updated, cur, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil {
cc.configOk.SetFailSyncCondition(reason) cc.configStatus.SetErrorOverride(fmt.Sprintf(status.SyncErrorFmt, reason))
syncerr = fmt.Errorf("%s, error: %v", reason, err) syncerr = fmt.Errorf("%s, error: %v", reason, err)
return return
} else if updated { } else if updated {
@ -99,8 +99,8 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
// If we get here: // If we get here:
// - there is no need to restart to update the current config // - there is no need to restart to update the current config
// - there was no error trying to sync configuration // - there was no error trying to sync configuration
// - if, previously, there was an error trying to sync configuration, we need to clear that error from the condition // - if, previously, there was an error trying to sync configuration, we need to clear that error from the status
cc.configOk.ClearFailSyncCondition() cc.configStatus.SetErrorOverride("")
} }
// doSyncConfigSource checkpoints and sets the store's current config to the new config or resets config, // doSyncConfigSource checkpoints and sets the store's current config to the new config or resets config,
@ -125,7 +125,7 @@ func (cc *Controller) doSyncConfigSource(client clientset.Interface, source *api
if err != nil { if err != nil {
return false, nil, reason, err return false, nil, reason, err
} }
updated, reason, err := cc.setCurrentConfig(remote) updated, reason, err := cc.setAssignedConfig(remote)
if err != nil { if err != nil {
return false, nil, reason, err return false, nil, reason, err
} }
@ -137,9 +137,9 @@ func (cc *Controller) doSyncConfigSource(client clientset.Interface, source *api
func (cc *Controller) checkpointConfigSource(client clientset.Interface, source checkpoint.RemoteConfigSource) (string, error) { func (cc *Controller) checkpointConfigSource(client clientset.Interface, source checkpoint.RemoteConfigSource) (string, error) {
// if the checkpoint already exists, skip downloading // if the checkpoint already exists, skip downloading
if ok, err := cc.checkpointStore.Exists(source); err != nil { if ok, err := cc.checkpointStore.Exists(source); err != nil {
reason := fmt.Sprintf(status.FailSyncReasonCheckpointExistenceFmt, source.APIPath(), source.UID()) return status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err)
return reason, fmt.Errorf("%s, error: %v", reason, err)
} else if ok { } else if ok {
// TODO(mtaufen): update this to include ResourceVersion in #63221
utillog.Infof("checkpoint already exists for object %s with UID %s, skipping download", source.APIPath(), source.UID()) utillog.Infof("checkpoint already exists for object %s with UID %s, skipping download", source.APIPath(), source.UID())
return "", nil return "", nil
} }
@ -153,28 +153,21 @@ func (cc *Controller) checkpointConfigSource(client clientset.Interface, source
// save // save
err = cc.checkpointStore.Save(payload) err = cc.checkpointStore.Save(payload)
if err != nil { if err != nil {
reason := fmt.Sprintf(status.FailSyncReasonSaveCheckpointFmt, source.APIPath(), payload.UID()) return status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err)
return reason, fmt.Errorf("%s, error: %v", reason, err)
} }
return "", nil return "", nil
} }
// setCurrentConfig the current checkpoint config in the store // setAssignedConfig updates the assigned checkpoint config in the store.
// returns whether the current config changed as a result, or a sanitized failure reason and an error. // Returns whether the current config changed as a result, or a sanitized failure reason and an error.
func (cc *Controller) setCurrentConfig(source checkpoint.RemoteConfigSource) (bool, string, error) { func (cc *Controller) setAssignedConfig(source checkpoint.RemoteConfigSource) (bool, string, error) {
failReason := func(s checkpoint.RemoteConfigSource) string { current, err := cc.checkpointStore.Assigned()
if source == nil {
return status.FailSyncReasonSetCurrentLocal
}
return fmt.Sprintf(status.FailSyncReasonSetCurrentUIDFmt, source.APIPath(), source.UID())
}
current, err := cc.checkpointStore.Current()
if err != nil { if err != nil {
return false, failReason(source), err return false, status.InternalError, err
} }
if err := cc.checkpointStore.SetCurrent(source); err != nil { if err := cc.checkpointStore.SetAssigned(source); err != nil {
return false, failReason(source), err return false, status.InternalError, err
} }
return !checkpoint.EqualRemoteConfigSources(current, source), "", nil return !checkpoint.EqualRemoteConfigSources(current, source), "", nil
} }
@ -184,7 +177,7 @@ func (cc *Controller) setCurrentConfig(source checkpoint.RemoteConfigSource) (bo
func (cc *Controller) resetConfig() (bool, string, error) { func (cc *Controller) resetConfig() (bool, string, error) {
updated, err := cc.checkpointStore.Reset() updated, err := cc.checkpointStore.Reset()
if err != nil { if err != nil {
return false, status.FailSyncReasonReset, err return false, status.InternalError, err
} }
return updated, "", nil return updated, "", nil
} }

View File

@ -21,7 +21,6 @@ import (
"path/filepath" "path/filepath"
"time" "time"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
@ -47,14 +46,11 @@ const (
// Controller manages syncing dynamic Kubelet configurations // Controller manages syncing dynamic Kubelet configurations
// For more information, see the proposal: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/dynamic-kubelet-configuration.md // For more information, see the proposal: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/dynamic-kubelet-configuration.md
type Controller struct { type Controller struct {
// defaultConfig is the configuration to use if no initConfig is provided
defaultConfig *kubeletconfig.KubeletConfiguration
// pendingConfigSource; write to this channel to indicate that the config source needs to be synced from the API server // pendingConfigSource; write to this channel to indicate that the config source needs to be synced from the API server
pendingConfigSource chan bool pendingConfigSource chan bool
// configOk manages the KubeletConfigOk condition that is reported in Node.Status.Conditions // configStatus manages the status we report on the Node object
configOk status.ConfigOkCondition configStatus status.NodeConfigStatus
// informer is the informer that watches the Node object // informer is the informer that watches the Node object
informer cache.SharedInformer informer cache.SharedInformer
@ -64,12 +60,11 @@ type Controller struct {
} }
// NewController constructs a new Controller object and returns it. Directory paths must be absolute. // NewController constructs a new Controller object and returns it. Directory paths must be absolute.
func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, dynamicConfigDir string) *Controller { func NewController(dynamicConfigDir string) *Controller {
return &Controller{ return &Controller{
defaultConfig: defaultConfig,
// channels must have capacity at least 1, since we signal with non-blocking writes // channels must have capacity at least 1, since we signal with non-blocking writes
pendingConfigSource: make(chan bool, 1), pendingConfigSource: make(chan bool, 1),
configOk: status.NewConfigOkCondition(), configStatus: status.NewNodeConfigStatus(),
checkpointStore: store.NewFsStore(utilfs.DefaultFs{}, filepath.Join(dynamicConfigDir, storeDir)), checkpointStore: store.NewFsStore(utilfs.DefaultFs{}, filepath.Join(dynamicConfigDir, storeDir)),
} }
} }
@ -79,26 +74,39 @@ func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, dynamicCon
func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
utillog.Infof("starting controller") utillog.Infof("starting controller")
// ALWAYS validate the local config. This makes incorrectly provisioned nodes an error.
// It must be valid because it is the default last-known-good config.
utillog.Infof("validating local config")
if err := validation.ValidateKubeletConfiguration(cc.defaultConfig); err != nil {
return nil, fmt.Errorf("local config failed validation, error: %v", err)
}
// ensure the filesystem is initialized // ensure the filesystem is initialized
if err := cc.initializeDynamicConfigDir(); err != nil { if err := cc.initializeDynamicConfigDir(); err != nil {
return nil, err return nil, err
} }
assigned, curSource, reason, err := cc.loadAssignedConfig(cc.defaultConfig) // determine assigned source and set status
assignedSource, err := cc.checkpointStore.Assigned()
if err != nil {
return nil, err
}
if assignedSource != nil {
cc.configStatus.SetAssigned(assignedSource.NodeConfigSource())
}
// determine last-known-good source and set status
lastKnownGoodSource, err := cc.checkpointStore.LastKnownGood()
if err != nil {
return nil, err
}
if lastKnownGoodSource != nil {
cc.configStatus.SetLastKnownGood(lastKnownGoodSource.NodeConfigSource())
}
// if the assigned source is nil, return nil to indicate local config
if assignedSource == nil {
return nil, nil
}
// attempt to load assigned config
assignedConfig, reason, err := cc.loadConfig(assignedSource)
if err == nil { if err == nil {
// set the status to indicate we will use the assigned config // update the active source to the non-nil assigned source
if curSource != nil { cc.configStatus.SetActive(assignedSource.NodeConfigSource())
cc.configOk.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curSource.APIPath()), reason, apiv1.ConditionTrue)
} else {
cc.configOk.Set(status.CurLocalMessage, reason, apiv1.ConditionTrue)
}
// update the last-known-good config if necessary, and start a timer that // update the last-known-good config if necessary, and start a timer that
// periodically checks whether the last-known good needs to be updated // periodically checks whether the last-known good needs to be updated
@ -106,32 +114,34 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
// wait.Forever will call the func once before starting the timer // wait.Forever will call the func once before starting the timer
go wait.Forever(func() { cc.checkTrial(configTrialDuration) }, 10*time.Second) go wait.Forever(func() { cc.checkTrial(configTrialDuration) }, 10*time.Second)
return assigned, nil return assignedConfig, nil
} // Assert: the assigned config failed to load, parse, or validate } // Assert: the assigned config failed to load or validate
// TODO(mtaufen): consider re-attempting download when a load/verify/parse/validate // TODO(mtaufen): consider re-attempting download when a load/verify/parse/validate
// error happens outside trial period, we already made it past the trial so it's probably filesystem corruption // error happens outside trial period, we already made it past the trial so it's probably filesystem corruption
// or something else scary (unless someone is using a 0-length trial period) // or something else scary
// load from checkpoint
// log the reason and error details for the failure to load the assigned config // log the reason and error details for the failure to load the assigned config
utillog.Errorf(fmt.Sprintf("%s, error: %v", reason, err)) utillog.Errorf(fmt.Sprintf("%s, error: %v", reason, err))
// load the last-known-good config // set status to indicate the failure with the assigned config
lkg, lkgSource, err := cc.loadLastKnownGoodConfig(cc.defaultConfig) cc.configStatus.SetError(reason)
// if the last-known-good source is nil, return nil to indicate local config
if lastKnownGoodSource == nil {
return nil, nil
}
// attempt to load the last-known-good config
lastKnownGoodConfig, _, err := cc.loadConfig(lastKnownGoodSource)
if err != nil { if err != nil {
// we failed to load the last-known-good, so something is really messed up and we just return the error
return nil, err return nil, err
} }
// set the status to indicate that we had to roll back to the lkg for the reason reported when we tried to load the assigned config // update the active source to the non-nil last-known-good source
if lkgSource != nil { cc.configStatus.SetActive(lastKnownGoodSource.NodeConfigSource())
cc.configOk.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgSource.APIPath()), reason, apiv1.ConditionFalse) return lastKnownGoodConfig, nil
} else {
cc.configOk.Set(status.LkgLocalMessage, reason, apiv1.ConditionFalse)
}
// return the last-known-good config
return lkg, nil
} }
// StartSync launches the controller's sync loops if `client` is non-nil and `nodeName` is non-empty. // StartSync launches the controller's sync loops if `client` is non-nil and `nodeName` is non-empty.
@ -146,11 +156,11 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
return return
} }
// start the ConfigOk condition sync loop // start the status sync loop
go utilpanic.HandlePanic(func() { go utilpanic.HandlePanic(func() {
utillog.Infof("starting ConfigOk condition sync loop") utillog.Infof("starting status sync loop")
wait.JitterUntil(func() { wait.JitterUntil(func() {
cc.configOk.Sync(client, nodeName) cc.configStatus.Sync(client, nodeName)
}, 10*time.Second, 0.2, true, wait.NeverStop) }, 10*time.Second, 0.2, true, wait.NeverStop)
})() })()
@ -176,54 +186,18 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
} }
// loadAssignedConfig loads the Kubelet's currently assigned config, // loadConfig loads Kubelet config from a checkpoint
// based on the setting in the local checkpoint store. // It returns the loaded configuration or a clean failure reason (for status reporting) and an error.
// It returns the loaded configuration, the checkpoint store's config source record, func (cc *Controller) loadConfig(source checkpoint.RemoteConfigSource) (*kubeletconfig.KubeletConfiguration, string, error) {
// a clean success or failure reason that can be reported in the status, and any error that occurs.
// If the local config should be used, it will be returned. You should validate local before passing it to this function.
func (cc *Controller) loadAssignedConfig(local *kubeletconfig.KubeletConfiguration) (*kubeletconfig.KubeletConfiguration, checkpoint.RemoteConfigSource, string, error) {
source, err := cc.checkpointStore.Current()
if err != nil {
return nil, nil, fmt.Sprintf(status.CurFailLoadReasonFmt, "unknown"), err
}
// nil source is the signal to use the local config
if source == nil {
return local, source, status.CurLocalOkayReason, nil
}
// load KubeletConfiguration from checkpoint // load KubeletConfiguration from checkpoint
kc, err := cc.checkpointStore.Load(source) kc, err := cc.checkpointStore.Load(source)
if err != nil { if err != nil {
return nil, source, fmt.Sprintf(status.CurFailLoadReasonFmt, source.APIPath()), err return nil, status.LoadError, err
} }
if err := validation.ValidateKubeletConfiguration(kc); err != nil { if err := validation.ValidateKubeletConfiguration(kc); err != nil {
return nil, source, fmt.Sprintf(status.CurFailValidateReasonFmt, source.APIPath()), err return nil, status.ValidateError, err
} }
return kc, source, status.CurRemoteOkayReason, nil return kc, "", nil
}
// loadLastKnownGoodConfig loads the Kubelet's last-known-good config,
// based on the setting in the local checkpoint store.
// It returns the loaded configuration, the checkpoint store's config source record,
// and any error that occurs.
// If the local config should be used, it will be returned. You should validate local before passing it to this function.
func (cc *Controller) loadLastKnownGoodConfig(local *kubeletconfig.KubeletConfiguration) (*kubeletconfig.KubeletConfiguration, checkpoint.RemoteConfigSource, error) {
source, err := cc.checkpointStore.LastKnownGood()
if err != nil {
return nil, nil, fmt.Errorf("unable to determine last-known-good config, error: %v", err)
}
// nil source is the signal to use the local config
if source == nil {
return local, source, nil
}
// load from checkpoint
kc, err := cc.checkpointStore.Load(source)
if err != nil {
return nil, source, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailLoadReasonFmt, source.APIPath()), err)
}
if err := validation.ValidateKubeletConfiguration(kc); err != nil {
return nil, source, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailValidateReasonFmt, source.APIPath()), err)
}
return kc, source, nil
} }
// initializeDynamicConfigDir makes sure that the storage layers for various controller components are set up correctly // initializeDynamicConfigDir makes sure that the storage layers for various controller components are set up correctly
@ -246,10 +220,10 @@ func (cc *Controller) checkTrial(duration time.Duration) {
} }
} }
// inTrial returns true if the time elapsed since the last modification of the current config does not exceed `trialDur`, false otherwise // inTrial returns true if the time elapsed since the last modification of the assigned config does not exceed `trialDur`, false otherwise
func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) { func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) {
now := time.Now() now := time.Now()
t, err := cc.checkpointStore.CurrentModified() t, err := cc.checkpointStore.AssignedModified()
if err != nil { if err != nil {
return false, err return false, err
} }
@ -260,15 +234,19 @@ func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) {
} }
// graduateAssignedToLastKnownGood sets the last-known-good in the checkpointStore // graduateAssignedToLastKnownGood sets the last-known-good in the checkpointStore
// to the same value as the current config maintained by the checkpointStore // to the same value as the assigned config maintained by the checkpointStore
func (cc *Controller) graduateAssignedToLastKnownGood() error { func (cc *Controller) graduateAssignedToLastKnownGood() error {
current, err := cc.checkpointStore.Current() // get the assigned config
assigned, err := cc.checkpointStore.Assigned()
if err != nil { if err != nil {
return err return err
} }
err = cc.checkpointStore.SetLastKnownGood(current) // update the last-known-good config
err = cc.checkpointStore.SetLastKnownGood(assigned)
if err != nil { if err != nil {
return err return err
} }
// update the status to reflect the new last-known-good config
cc.configStatus.SetLastKnownGood(assigned.NodeConfigSource())
return nil return nil
} }

View File

@ -10,14 +10,11 @@ go_library(
srcs = ["status.go"], srcs = ["status.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status", importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status",
deps = [ deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/kubelet/kubeletconfig/util/equal:go_default_library",
"//pkg/kubelet/kubeletconfig/util/log:go_default_library", "//pkg/kubelet/kubeletconfig/util/log:go_default_library",
"//pkg/util/node:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library",
], ],
) )

View File

@ -19,298 +19,164 @@ package status
import ( import (
"fmt" "fmt"
"sync" "sync"
"time"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kuberuntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
utilequal "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal"
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
nodeutil "k8s.io/kubernetes/pkg/util/node"
) )
// TODO(mtaufen): s/current/assigned, as this is more accurate e.g. if you are using lkg, you aren't currently using "current" :)
const ( const (
// CurLocalMessage indicates that the Kubelet is using its local config, which consists of defaults, flags, and/or local files // LoadError indicates that the Kubelet failed to load the config checkpoint
CurLocalMessage = "using current: local" LoadError = "failed to load config, see Kubelet log for details"
// LkgLocalMessage indicates that the Kubelet is using its local config, which consists of defaults, flags, and/or local files // ValidateError indicates that the Kubelet failed to validate the config checkpoint
LkgLocalMessage = "using last-known-good: local" ValidateError = "failed to validate config, see Kubelet log for details"
// AllNilSubfieldsError is used when no subfields are set
// CurRemoteMessageFmt indicates the Kubelet is using its current config, which is from an API source // This could happen in the case that an old client tries to read an object from a newer API server with a set subfield it does not know about
CurRemoteMessageFmt = "using current: %s" AllNilSubfieldsError = "invalid NodeConfigSource, exactly one subfield must be non-nil, but all were nil"
// LkgRemoteMessageFmt indicates the Kubelet is using its last-known-good config, which is from an API source // UIDMismatchErrorFmt is used when there is a UID mismatch between the referenced and downloaded ConfigMaps,
LkgRemoteMessageFmt = "using last-known-good: %s"
// CurLocalOkayReason indicates that the Kubelet is using its local config
CurLocalOkayReason = "when the config source is nil, the Kubelet uses its local config"
// CurRemoteOkayReason indicates that the config referenced by Node.ConfigSource is currently passing all checks
CurRemoteOkayReason = "passing all checks"
// CurFailLoadReasonFmt indicates that the Kubelet failed to load the current config checkpoint for an API source
CurFailLoadReasonFmt = "failed to load current: %s"
// CurFailValidateReasonFmt indicates that the Kubelet failed to validate the current config checkpoint for an API source
CurFailValidateReasonFmt = "failed to validate current: %s"
// LkgFail*ReasonFmt reasons are currently used to print errors in the Kubelet log, but do not appear in Node.Status.Conditions
// LkgFailLoadReasonFmt indicates that the Kubelet failed to load the last-known-good config checkpoint for an API source
LkgFailLoadReasonFmt = "failed to load last-known-good: %s"
// LkgFailValidateReasonFmt indicates that the Kubelet failed to validate the last-known-good config checkpoint for an API source
LkgFailValidateReasonFmt = "failed to validate last-known-good: %s"
// FailSyncReasonFmt is used when the system couldn't sync the config, due to a malformed Node.Spec.ConfigSource, a download failure, etc.
FailSyncReasonFmt = "failed to sync, reason: %s"
// FailSyncReasonAllNilSubfields is used when no subfields are set
FailSyncReasonAllNilSubfields = "invalid NodeConfigSource, exactly one subfield must be non-nil, but all were nil"
// FailSyncReasonPartialConfigMapSource is used when some required subfields remain unset
FailSyncReasonPartialConfigMapSource = "invalid ConfigSource.ConfigMap, all of UID, Name, Namespace, and KubeletConfigKey must be specified"
// FailSyncReasonUIDMismatchFmt is used when there is a UID mismatch between the referenced and downloaded ConfigMaps,
// this can happen because objects must be downloaded by namespace/name, rather than by UID // this can happen because objects must be downloaded by namespace/name, rather than by UID
FailSyncReasonUIDMismatchFmt = "invalid ConfigSource.ConfigMap.UID: %s does not match %s.UID: %s" // TODO(mtaufen): remove this in #63221
// FailSyncReasonDownloadFmt is used when the download fails, e.g. due to network issues UIDMismatchErrorFmt = "invalid ConfigSource.ConfigMap.UID: %s does not match %s.UID: %s"
FailSyncReasonDownloadFmt = "failed to download: %s" // DownloadError is used when the download fails, e.g. due to network issues
// FailSyncReasonInformer is used when the informer fails to report the Node object DownloadError = "failed to download config, see Kubelet log for details"
FailSyncReasonInformer = "failed to read Node from informer object cache" // InternalError indicates that some internal error happened while trying to sync config, e.g. filesystem issues
// FailSyncReasonReset is used when we can't reset the local configuration references, e.g. due to filesystem issues InternalError = "internal failure, see Kubelet log for details"
FailSyncReasonReset = "failed to reset to local config"
// FailSyncReasonCheckpointExistenceFmt is used when we can't determine if a checkpoint already exists, e.g. due to filesystem issues
FailSyncReasonCheckpointExistenceFmt = "failed to determine whether object %s with UID %s was already checkpointed"
// FailSyncReasonSaveCheckpointFmt is used when we can't save a checkpoint, e.g. due to filesystem issues
FailSyncReasonSaveCheckpointFmt = "failed to save config checkpoint for object %s with UID %s"
// FailSyncReasonSetCurrentDefault is used when we can't set the current config checkpoint to the local default, e.g. due to filesystem issues
FailSyncReasonSetCurrentLocal = "failed to set current config checkpoint to local config"
// FailSyncReasonSetCurrentUIDFmt is used when we can't set the current config checkpoint to a checkpointed object, e.g. due to filesystem issues
FailSyncReasonSetCurrentUIDFmt = "failed to set current config checkpoint to object %s with UID %s"
// EmptyMessage is a placeholder in the case that we accidentally set the condition's message to the empty string. // SyncErrorFmt is used when the system couldn't sync the config, due to a malformed Node.Spec.ConfigSource, a download failure, etc.
// Doing so can result in a partial patch, and thus a confusing status; this makes it clear that the message was not provided. SyncErrorFmt = "failed to sync: %s"
EmptyMessage = "unknown - message not provided"
// EmptyReason is a placeholder in the case that we accidentally set the condition's reason to the empty string.
// Doing so can result in a partial patch, and thus a confusing status; this makes it clear that the reason was not provided.
EmptyReason = "unknown - reason not provided"
) )
// ConfigOkCondition represents a ConfigOk NodeCondition // NodeConfigStatus represents Node.Status.Config
type ConfigOkCondition interface { type NodeConfigStatus interface {
// Set sets the Message, Reason, and Status of the condition // SetActive sets the active source in the status
Set(message, reason string, status apiv1.ConditionStatus) SetActive(source *apiv1.NodeConfigSource)
// SetFailSyncCondition sets the condition for when syncing Kubelet config fails // SetAssigned sets the assigned source in the status
SetFailSyncCondition(reason string) SetAssigned(source *apiv1.NodeConfigSource)
// ClearFailSyncCondition clears the overlay from SetFailSyncCondition // SetLastKnownGood sets the last-known-good source in the status
ClearFailSyncCondition() SetLastKnownGood(source *apiv1.NodeConfigSource)
// Sync patches the current condition into the Node identified by `nodeName` // SetError sets the error associated with the status
SetError(err string)
// SetErrorOverride sets an error that overrides the base error set by SetError.
// If the override is set to the empty string, the base error is reported in
// the status, otherwise the override is reported.
SetErrorOverride(err string)
// Sync patches the current status into the Node identified by `nodeName` if an update is pending
Sync(client clientset.Interface, nodeName string) Sync(client clientset.Interface, nodeName string)
} }
// configOkCondition implements ConfigOkCondition type nodeConfigStatus struct {
type configOkCondition struct { // status is the core NodeConfigStatus that we report
// conditionMux is a mutex on the condition, alternate between setting and syncing the condition status apiv1.NodeConfigStatus
conditionMux sync.Mutex // mux is a mutex on the nodeConfigStatus, alternate between setting and syncing the status
// message will appear as the condition's message mux sync.Mutex
message string // errorOverride is sent in place of the usual error if it is non-empty
// reason will appear as the condition's reason errorOverride string
reason string // syncCh; write to this channel to indicate that the status needs to be synced to the API server
// status will appear as the condition's status syncCh chan bool
status apiv1.ConditionStatus
// failedSyncReason is sent in place of the usual reason when the Kubelet is failing to sync the remote config
failedSyncReason string
// pendingCondition; write to this channel to indicate that ConfigOk needs to be synced to the API server
pendingCondition chan bool
} }
// NewConfigOkCondition returns a new ConfigOkCondition // NewNodeConfigStatus returns a new NodeConfigStatus interface
func NewConfigOkCondition() ConfigOkCondition { func NewNodeConfigStatus() NodeConfigStatus {
return &configOkCondition{ return &nodeConfigStatus{
message: EmptyMessage,
reason: EmptyReason,
status: apiv1.ConditionUnknown,
// channels must have capacity at least 1, since we signal with non-blocking writes // channels must have capacity at least 1, since we signal with non-blocking writes
pendingCondition: make(chan bool, 1), syncCh: make(chan bool, 1),
} }
} }
// unsafeSet sets the current state of the condition // transact grabs the lock, performs the fn, records the need to sync, and releases the lock
// it does not grab the conditionMux lock, so you should generally use setConfigOk unless you need to grab the lock func (s *nodeConfigStatus) transact(fn func()) {
// at a higher level to synchronize additional operations s.mux.Lock()
func (c *configOkCondition) unsafeSet(message, reason string, status apiv1.ConditionStatus) { defer s.mux.Unlock()
// We avoid an empty Message, Reason, or Status on the condition. Since we use Patch to update conditions, an empty fn()
// field might cause a value from a previous condition to leak through, which can be very confusing. s.sync()
// message
if len(message) == 0 {
message = EmptyMessage
}
c.message = message
// reason
if len(reason) == 0 {
reason = EmptyReason
}
c.reason = reason
// status
if len(string(status)) == 0 {
status = apiv1.ConditionUnknown
}
c.status = status
// always poke worker after update
c.pokeSyncWorker()
} }
func (c *configOkCondition) Set(message, reason string, status apiv1.ConditionStatus) { func (s *nodeConfigStatus) SetAssigned(source *apiv1.NodeConfigSource) {
c.conditionMux.Lock() s.transact(func() {
defer c.conditionMux.Unlock() s.status.Assigned = source
c.unsafeSet(message, reason, status) })
} }
// SetFailSyncCondition updates the ConfigOk status to reflect that we failed to sync to the latest config, func (s *nodeConfigStatus) SetActive(source *apiv1.NodeConfigSource) {
// e.g. due to a malformed Node.Spec.ConfigSource, a download failure, etc. s.transact(func() {
func (c *configOkCondition) SetFailSyncCondition(reason string) { s.status.Active = source
c.conditionMux.Lock() })
defer c.conditionMux.Unlock()
// set the reason overlay and poke the sync worker to send the update
c.failedSyncReason = fmt.Sprintf(FailSyncReasonFmt, reason)
c.pokeSyncWorker()
} }
// ClearFailSyncCondition removes the "failed to sync" reason overlay func (s *nodeConfigStatus) SetLastKnownGood(source *apiv1.NodeConfigSource) {
func (c *configOkCondition) ClearFailSyncCondition() { s.transact(func() {
c.conditionMux.Lock() s.status.LastKnownGood = source
defer c.conditionMux.Unlock() })
// clear the reason overlay and poke the sync worker to send the update
c.failedSyncReason = ""
c.pokeSyncWorker()
} }
// pokeSyncWorker notes that the ConfigOk condition needs to be synced to the API server func (s *nodeConfigStatus) SetError(err string) {
func (c *configOkCondition) pokeSyncWorker() { s.transact(func() {
s.status.Error = err
})
}
func (s *nodeConfigStatus) SetErrorOverride(err string) {
s.transact(func() {
s.errorOverride = err
})
}
// sync notes that the status needs to be synced to the API server
func (s *nodeConfigStatus) sync() {
select { select {
case c.pendingCondition <- true: case s.syncCh <- true:
default: default:
} }
} }
// Sync attempts to sync `c.condition` with the Node object for this Kubelet, // Sync attempts to sync the status with the Node object for this Kubelet,
// if syncing fails, an error is logged, and work is queued for retry. // if syncing fails, an error is logged, and work is queued for retry.
func (c *configOkCondition) Sync(client clientset.Interface, nodeName string) { func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) {
select { select {
case <-c.pendingCondition: case <-s.syncCh:
default: default:
// no work to be done, return // no work to be done, return
return return
} }
// grab the lock // grab the lock
c.conditionMux.Lock() s.mux.Lock()
defer c.conditionMux.Unlock() defer s.mux.Unlock()
// if the sync fails, we want to retry // if the sync fails, we want to retry
var err error var err error
defer func() { defer func() {
if err != nil { if err != nil {
utillog.Errorf(err.Error()) utillog.Errorf(err.Error())
c.pokeSyncWorker() s.sync()
} }
}() }()
// get the Node so we can check the current condition // get the Node so we can check the current status
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) oldNode, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
err = fmt.Errorf("could not get Node %q, will not sync ConfigOk condition, error: %v", nodeName, err) err = fmt.Errorf("could not get Node %q, will not sync status, error: %v", nodeName, err)
return return
} }
// construct the node condition status := &s.status
condition := &apiv1.NodeCondition{ // override error, if necessary
Type: apiv1.NodeKubeletConfigOk, if len(s.errorOverride) > 0 {
Message: c.message, // copy the status, so we don't overwrite the prior error
Reason: c.reason, // with the override
Status: c.status, status = status.DeepCopy()
status.Error = s.errorOverride
} }
// overlay failed sync reason, if necessary // apply the status to a copy of the node so we don't modify the object in the informer's store
if len(c.failedSyncReason) > 0 { newNode := oldNode.DeepCopy()
condition.Reason = c.failedSyncReason newNode.Status.Config = status
condition.Status = apiv1.ConditionFalse
}
// set timestamps // patch the node with the new status
syncTime := metav1.NewTime(time.Now()) if _, _, err := nodeutil.PatchNodeStatus(client.CoreV1(), types.NodeName(nodeName), oldNode, newNode); err != nil {
condition.LastHeartbeatTime = syncTime utillog.Errorf("failed to patch node status, error: %v", err)
if remote := getKubeletConfigOk(node.Status.Conditions); remote == nil || !utilequal.KubeletConfigOkEq(remote, condition) {
// update transition time the first time we create the condition,
// or if we are semantically changing the condition
condition.LastTransitionTime = syncTime
} else {
// since the conditions are semantically equal, use lastTransitionTime from the condition currently on the Node
// we need to do this because the field will always be represented in the patch generated below, and this copy
// prevents nullifying the field during the patch operation
condition.LastTransitionTime = remote.LastTransitionTime
}
// generate the patch
mediaType := "application/json"
info, ok := kuberuntime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), mediaType)
if !ok {
err = fmt.Errorf("unsupported media type %q", mediaType)
return
}
versions := legacyscheme.Scheme.PrioritizedVersionsForGroup(api.GroupName)
if len(versions) == 0 {
err = fmt.Errorf("no enabled versions for group %q", api.GroupName)
return
}
// the "best" version supposedly comes first in the list returned from apiv1.Registry.EnabledVersionsForGroup
encoder := legacyscheme.Codecs.EncoderForVersion(info.Serializer, versions[0])
before, err := kuberuntime.Encode(encoder, node)
if err != nil {
err = fmt.Errorf(`failed to encode "before" node while generating patch, error: %v`, err)
return
}
patchConfigOk(node, condition)
after, err := kuberuntime.Encode(encoder, node)
if err != nil {
err = fmt.Errorf(`failed to encode "after" node while generating patch, error: %v`, err)
return
}
patch, err := strategicpatch.CreateTwoWayMergePatch(before, after, apiv1.Node{})
if err != nil {
err = fmt.Errorf("failed to generate patch for updating ConfigOk condition, error: %v", err)
return
}
// patch the remote Node object
_, err = client.CoreV1().Nodes().PatchStatus(nodeName, patch)
if err != nil {
err = fmt.Errorf("could not update ConfigOk condition, error: %v", err)
return
} }
} }
// patchConfigOk replaces or adds the ConfigOk condition to the node
func patchConfigOk(node *apiv1.Node, configOk *apiv1.NodeCondition) {
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == apiv1.NodeKubeletConfigOk {
// edit the condition
node.Status.Conditions[i] = *configOk
return
}
}
// append the condition
node.Status.Conditions = append(node.Status.Conditions, *configOk)
}
// getKubeletConfigOk returns the first NodeCondition in `cs` with Type == apiv1.NodeKubeletConfigOk,
// or if no such condition exists, returns nil.
func getKubeletConfigOk(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
for i := range cs {
if cs[i].Type == apiv1.NodeKubeletConfigOk {
return &cs[i]
}
}
return nil
}

View File

@ -54,7 +54,7 @@ type REST struct {
proxyTransport http.RoundTripper proxyTransport http.RoundTripper
} }
// StatusREST implements the REST endpoint for changing the status of a pod. // StatusREST implements the REST endpoint for changing the status of a node.
type StatusREST struct { type StatusREST struct {
store *genericregistry.Store store *genericregistry.Store
} }

View File

@ -127,14 +127,23 @@ type nodeStatusStrategy struct {
var StatusStrategy = nodeStatusStrategy{Strategy} var StatusStrategy = nodeStatusStrategy{Strategy}
func (nodeStatusStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { func (nodeStatusStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
_ = obj.(*api.Node) node := obj.(*api.Node)
// Nodes allow *all* fields, including status, to be set on create. // Nodes allow *all* fields, including status, to be set on create.
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
node.Status.Config = nil
}
} }
func (nodeStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { func (nodeStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
newNode := obj.(*api.Node) newNode := obj.(*api.Node)
oldNode := old.(*api.Node) oldNode := old.(*api.Node)
newNode.Spec = oldNode.Spec newNode.Spec = oldNode.Spec
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
newNode.Status.Config = nil
oldNode.Status.Config = nil
}
} }
func (nodeStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { func (nodeStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {

File diff suppressed because it is too large Load Diff

View File

@ -1847,6 +1847,56 @@ message NodeConfigSource {
optional ConfigMapNodeConfigSource configMap = 2; optional ConfigMapNodeConfigSource configMap = 2;
} }
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
message NodeConfigStatus {
// Assigned reports the checkpointed config the node will try to use.
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
// config payload to local disk, along with a record indicating intended
// config. The node refers to this record to choose its config checkpoint, and
// reports this record in Assigned. Assigned only updates in the status after
// the record has been checkpointed to disk. When the Kubelet is restarted,
// it tries to make the Assigned config the Active config by loading and
// validating the checkpointed payload identified by Assigned.
// +optional
optional NodeConfigSource assigned = 1;
// Active reports the checkpointed config the node is actively using.
// Active will represent either the current version of the Assigned config,
// or the current LastKnownGood config, depending on whether attempting to use the
// Assigned config results in an error.
// +optional
optional NodeConfigSource active = 2;
// LastKnownGood reports the checkpointed config the node will fall back to
// when it encounters an error attempting to use the Assigned config.
// The Assigned config becomes the LastKnownGood config when the node determines
// that the Assigned config is stable and correct.
// This is currently implemented as a 10-minute soak period starting when the local
// record of Assigned config is updated. If the Assigned config is Active at the end
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
// because the local default config is always assumed good.
// You should not make assumptions about the node's method of determining config stability
// and correctness, as this may change or become configurable in the future.
// +optional
optional NodeConfigSource lastKnownGood = 3;
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
// to load or validate the Assigned config, etc.
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
// by fixing the config assigned in Spec.ConfigSource.
// You can find additional information for debugging by searching the error message in the Kubelet log.
// Error is a human-readable description of the error state; machines can check whether or not Error
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
// +optional
optional string error = 4;
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node. // NodeDaemonEndpoints lists ports opened by daemons running on the Node.
message NodeDaemonEndpoints { message NodeDaemonEndpoints {
// Endpoint on which Kubelet is listening. // Endpoint on which Kubelet is listening.
@ -2001,6 +2051,10 @@ message NodeStatus {
// List of volumes that are attached to the node. // List of volumes that are attached to the node.
// +optional // +optional
repeated AttachedVolume volumesAttached = 10; repeated AttachedVolume volumesAttached = 10;
// Status of the config assigned to the node via the dynamic Kubelet config feature.
// +optional
optional NodeConfigStatus config = 11;
} }
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. // NodeSystemInfo is a set of ids/uuids to uniquely identify the node.

View File

@ -3732,6 +3732,53 @@ type NodeSystemInfo struct {
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
} }
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
type NodeConfigStatus struct {
// Assigned reports the checkpointed config the node will try to use.
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
// config payload to local disk, along with a record indicating intended
// config. The node refers to this record to choose its config checkpoint, and
// reports this record in Assigned. Assigned only updates in the status after
// the record has been checkpointed to disk. When the Kubelet is restarted,
// it tries to make the Assigned config the Active config by loading and
// validating the checkpointed payload identified by Assigned.
// +optional
Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"`
// Active reports the checkpointed config the node is actively using.
// Active will represent either the current version of the Assigned config,
// or the current LastKnownGood config, depending on whether attempting to use the
// Assigned config results in an error.
// +optional
Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"`
// LastKnownGood reports the checkpointed config the node will fall back to
// when it encounters an error attempting to use the Assigned config.
// The Assigned config becomes the LastKnownGood config when the node determines
// that the Assigned config is stable and correct.
// This is currently implemented as a 10-minute soak period starting when the local
// record of Assigned config is updated. If the Assigned config is Active at the end
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
// because the local default config is always assumed good.
// You should not make assumptions about the node's method of determining config stability
// and correctness, as this may change or become configurable in the future.
// +optional
LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"`
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
// to load or validate the Assigned config, etc.
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
// by fixing the config assigned in Spec.ConfigSource.
// You can find additional information for debugging by searching the error message in the Kubelet log.
// Error is a human-readable description of the error state; machines can check whether or not Error
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// NodeStatus is information about the current status of a node. // NodeStatus is information about the current status of a node.
type NodeStatus struct { type NodeStatus struct {
// Capacity represents the total resources of a node. // Capacity represents the total resources of a node.
@ -3776,6 +3823,9 @@ type NodeStatus struct {
// List of volumes that are attached to the node. // List of volumes that are attached to the node.
// +optional // +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
// Status of the config assigned to the node via the dynamic Kubelet config feature.
// +optional
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
} }
type UniqueVolumeName string type UniqueVolumeName string
@ -3863,8 +3913,6 @@ const (
NodePIDPressure NodeConditionType = "PIDPressure" NodePIDPressure NodeConditionType = "PIDPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured. // NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
// NodeKubeletConfigOk indicates whether the kubelet is correctly configured
NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
) )
// NodeCondition contains condition information for a node. // NodeCondition contains condition information for a node.

View File

@ -990,6 +990,18 @@ func (NodeConfigSource) SwaggerDoc() map[string]string {
return map_NodeConfigSource return map_NodeConfigSource
} }
var map_NodeConfigStatus = map[string]string{
"": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
"assigned": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.",
"active": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.",
"lastKnownGood": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.",
"error": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.",
}
func (NodeConfigStatus) SwaggerDoc() map[string]string {
return map_NodeConfigStatus
}
var map_NodeDaemonEndpoints = map[string]string{ var map_NodeDaemonEndpoints = map[string]string{
"": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", "": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.",
"kubeletEndpoint": "Endpoint on which Kubelet is listening.", "kubeletEndpoint": "Endpoint on which Kubelet is listening.",
@ -1083,6 +1095,7 @@ var map_NodeStatus = map[string]string{
"images": "List of container images on this node", "images": "List of container images on this node",
"volumesInUse": "List of attachable volumes in use (mounted) by the node.", "volumesInUse": "List of attachable volumes in use (mounted) by the node.",
"volumesAttached": "List of volumes that are attached to the node.", "volumesAttached": "List of volumes that are attached to the node.",
"config": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
} }
func (NodeStatus) SwaggerDoc() map[string]string { func (NodeStatus) SwaggerDoc() map[string]string {

View File

@ -2398,6 +2398,49 @@ func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) {
*out = *in
if in.Assigned != nil {
in, out := &in.Assigned, &out.Assigned
if *in == nil {
*out = nil
} else {
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
}
if in.Active != nil {
in, out := &in.Active, &out.Active
if *in == nil {
*out = nil
} else {
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
}
if in.LastKnownGood != nil {
in, out := &in.LastKnownGood, &out.LastKnownGood
if *in == nil {
*out = nil
} else {
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus.
func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus {
if in == nil {
return nil
}
out := new(NodeConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) {
*out = *in *out = *in
@ -2650,6 +2693,15 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = make([]AttachedVolume, len(*in)) *out = make([]AttachedVolume, len(*in))
copy(*out, *in) copy(*out, *in)
} }
if in.Config != nil {
in, out := &in.Config, &out.Config
if *in == nil {
*out = nil
} else {
*out = new(NodeConfigStatus)
(*in).DeepCopyInto(*out)
}
}
return return
} }

View File

@ -147,6 +147,7 @@ go_test(
"//vendor/github.com/onsi/gomega/gstruct:go_default_library", "//vendor/github.com/onsi/gomega/gstruct:go_default_library",
"//vendor/github.com/onsi/gomega/types:go_default_library", "//vendor/github.com/onsi/gomega/types:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -25,6 +25,7 @@ import (
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig" controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
@ -36,11 +37,19 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
type configStateStatus struct {
apiv1.NodeConfigStatus
SkipActive bool
SkipAssigned bool
SkipLkg bool
}
type configState struct { type configState struct {
desc string desc string
configSource *apiv1.NodeConfigSource configSource *apiv1.NodeConfigSource
expectConfigOk *apiv1.NodeCondition expectConfigStatus *configStateStatus
expectConfig *kubeletconfig.KubeletConfiguration expectConfig *kubeletconfig.KubeletConfiguration
// whether to expect this substring in an error returned from the API server when updating the config source // whether to expect this substring in an error returned from the API server when updating the config source
apierr string apierr string
// whether the state would cause a config change event as a result of the update to Node.Spec.ConfigSource, // whether the state would cause a config change event as a result of the update to Node.Spec.ConfigSource,
@ -83,21 +92,27 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
// should be fine to reset the values using a remote config, even if they // should be fine to reset the values using a remote config, even if they
// were initially set via the locally provisioned configuration. // were initially set via the locally provisioned configuration.
// This is the same strategy several other e2e node tests use. // This is the same strategy several other e2e node tests use.
source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: originalConfigMap.UID,
Namespace: originalConfigMap.Namespace,
Name: originalConfigMap.Name,
KubeletConfigKey: "kubelet",
}}
setAndTestKubeletConfigState(f, &configState{desc: "reset to original values", setAndTestKubeletConfigState(f, &configState{desc: "reset to original values",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: source,
UID: originalConfigMap.UID, expectConfigStatus: &configStateStatus{
Namespace: originalConfigMap.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: originalConfigMap.Name, Active: source,
KubeletConfigKey: "kubelet", Assigned: source,
}}, },
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue, SkipLkg: true,
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(originalConfigMap)), },
Reason: status.CurRemoteOkayReason},
expectConfig: originalKC, expectConfig: originalKC,
}, false) }, false)
}) })
Context("When setting new NodeConfigSources that cause transitions between ConfigOk conditions", func() { Context("When changing NodeConfigSources", func() {
It("the Kubelet should report the appropriate status and configz", func() { It("the Kubelet should report the appropriate status and configz", func() {
var err error var err error
// we base the "correct" configmap off of the current configuration // we base the "correct" configmap off of the current configuration
@ -124,29 +139,77 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failValidateConfigMap) failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failValidateConfigMap)
framework.ExpectNoError(err) framework.ExpectNoError(err)
correctSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: correctConfigMap.UID,
Namespace: correctConfigMap.Namespace,
Name: correctConfigMap.Name,
KubeletConfigKey: "kubelet",
}}
failParseSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: failParseConfigMap.UID,
Namespace: failParseConfigMap.Namespace,
Name: failParseConfigMap.Name,
KubeletConfigKey: "kubelet",
}}
failValidateSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: failValidateConfigMap.UID,
Namespace: failValidateConfigMap.Namespace,
Name: failValidateConfigMap.Name,
KubeletConfigKey: "kubelet",
}}
// Note: since we start with the nil source (resets lkg), and we don't wait longer than the 10-minute internal
// qualification period before changing it again, we can assume lkg source will be nil in the status
// for this entire test, which is why we never set SkipLkg=true here.
states := []configState{ states := []configState{
{ {
desc: "Node.Spec.ConfigSource is nil", desc: "Node.Spec.ConfigSource is nil",
configSource: nil, configSource: nil,
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue, expectConfigStatus: &configStateStatus{},
Message: status.CurLocalMessage, expectConfig: nil,
Reason: status.CurLocalOkayReason}, event: true,
expectConfig: nil,
event: true,
}, },
// Node.Spec.ConfigSource has all nil subfields {
{desc: "Node.Spec.ConfigSource has all nil subfields", desc: "Node.Spec.ConfigSource has all nil subfields",
configSource: &apiv1.NodeConfigSource{}, configSource: &apiv1.NodeConfigSource{},
apierr: "exactly one reference subfield must be non-nil", apierr: "exactly one reference subfield must be non-nil",
}, },
// Node.Spec.ConfigSource.ConfigMap is partial {
{desc: "Node.Spec.ConfigSource.ConfigMap is partial", desc: "Node.Spec.ConfigSource.ConfigMap is missing namespace",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: "foo", UID: "foo",
Name: "bar", Name: "bar",
KubeletConfigKey: "kubelet", KubeletConfigKey: "kubelet",
}}, // missing Namespace }}, // missing Namespace
apierr: "spec.configSource.configMap.namespace: Required value: namespace must be set in spec", apierr: "spec.configSource.configMap.namespace: Required value: namespace must be set",
},
{
desc: "Node.Spec.ConfigSource.ConfigMap is missing name",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: "foo",
Namespace: "bar",
KubeletConfigKey: "kubelet",
}}, // missing Name
apierr: "spec.configSource.configMap.name: Required value: name must be set",
},
{
desc: "Node.Spec.ConfigSource.ConfigMap is missing kubeletConfigKey",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: "foo",
Namespace: "bar",
Name: "baz",
}}, // missing KubeletConfigKey
apierr: "spec.configSource.configMap.kubeletConfigKey: Required value: kubeletConfigKey must be set",
},
{
desc: "Node.Spec.ConfigSource.ConfigMap is missing uid",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
Namespace: "bar",
Name: "baz",
KubeletConfigKey: "kubelet",
}}, // missing uid
apierr: "spec.configSource.configMap.uid: Required value: uid must be set in spec",
}, },
{desc: "Node.Spec.ConfigSource.ConfigMap.ResourceVersion is illegally specified", {desc: "Node.Spec.ConfigSource.ConfigMap.ResourceVersion is illegally specified",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
@ -186,6 +249,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
apierr: "spec.configSource.configMap.kubeletConfigKey: Invalid value", apierr: "spec.configSource.configMap.kubeletConfigKey: Invalid value",
}, },
{ {
// TODO(mtaufen): remove in #63221
desc: "Node.Spec.ConfigSource.ConfigMap.UID does not align with Namespace/Name", desc: "Node.Spec.ConfigSource.ConfigMap.UID does not align with Namespace/Name",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: "foo", UID: "foo",
@ -193,51 +257,52 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
Name: correctConfigMap.Name, Name: correctConfigMap.Name,
KubeletConfigKey: "kubelet", KubeletConfigKey: "kubelet",
}}, }},
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse, expectConfigStatus: &configStateStatus{
Message: "", NodeConfigStatus: apiv1.NodeConfigStatus{
Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", configMapAPIPath(correctConfigMap), correctConfigMap.UID))}, Error: fmt.Sprintf(status.SyncErrorFmt, fmt.Sprintf(status.UIDMismatchErrorFmt, "foo", configMapAPIPath(correctConfigMap), correctConfigMap.UID)),
},
// skip assigned and active, because we don't know what the prior source will be
SkipAssigned: true,
SkipActive: true,
},
expectConfig: nil, expectConfig: nil,
event: false, event: false,
}, },
{ {
desc: "correct", desc: "correct",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: correctSource,
UID: correctConfigMap.UID, expectConfigStatus: &configStateStatus{
Namespace: correctConfigMap.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: correctConfigMap.Name, Active: correctSource,
KubeletConfigKey: "kubelet", Assigned: correctSource,
}}, },
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue, },
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(correctConfigMap)),
Reason: status.CurRemoteOkayReason},
expectConfig: correctKC, expectConfig: correctKC,
event: true, event: true,
}, },
{ {
desc: "fail-parse", desc: "fail-parse",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: failParseSource,
UID: failParseConfigMap.UID, expectConfigStatus: &configStateStatus{
Namespace: failParseConfigMap.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: failParseConfigMap.Name, Assigned: failParseSource,
KubeletConfigKey: "kubelet", Error: status.LoadError,
}}, },
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse, SkipActive: true,
Message: status.LkgLocalMessage, },
Reason: fmt.Sprintf(status.CurFailLoadReasonFmt, configMapAPIPath(failParseConfigMap))},
expectConfig: nil, expectConfig: nil,
event: true, event: true,
}, },
{ {
desc: "fail-validate", desc: "fail-validate",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: failValidateSource,
UID: failValidateConfigMap.UID, expectConfigStatus: &configStateStatus{
Namespace: failValidateConfigMap.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: failValidateConfigMap.Name, Assigned: failValidateSource,
KubeletConfigKey: "kubelet", Error: status.ValidateError,
}}, },
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse, SkipActive: true,
Message: status.LkgLocalMessage, },
Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, configMapAPIPath(failValidateConfigMap))},
expectConfig: nil, expectConfig: nil,
event: true, event: true,
}, },
@ -270,33 +335,45 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(badConfigMap) badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(badConfigMap)
framework.ExpectNoError(err) framework.ExpectNoError(err)
lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: lkgConfigMap.UID,
Namespace: lkgConfigMap.Namespace,
Name: lkgConfigMap.Name,
KubeletConfigKey: "kubelet",
}}
badSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: badConfigMap.UID,
Namespace: badConfigMap.Namespace,
Name: badConfigMap.Name,
KubeletConfigKey: "kubelet",
}}
states := []configState{ states := []configState{
// intended lkg // intended lkg
{desc: "intended last-known-good", {desc: "intended last-known-good",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: lkgSource,
UID: lkgConfigMap.UID, expectConfigStatus: &configStateStatus{
Namespace: lkgConfigMap.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: lkgConfigMap.Name, Active: lkgSource,
KubeletConfigKey: "kubelet", Assigned: lkgSource,
}}, },
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue, SkipLkg: true,
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(lkgConfigMap)), },
Reason: status.CurRemoteOkayReason},
expectConfig: lkgKC, expectConfig: lkgKC,
event: true, event: true,
}, },
// bad config // bad config
{desc: "bad config", {desc: "bad config",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: badSource,
UID: badConfigMap.UID, expectConfigStatus: &configStateStatus{
Namespace: badConfigMap.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: badConfigMap.Name, Active: lkgSource,
KubeletConfigKey: "kubelet", Assigned: badSource,
}}, LastKnownGood: lkgSource,
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse, Error: status.LoadError,
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, configMapAPIPath(lkgConfigMap)), },
Reason: fmt.Sprintf(status.CurFailLoadReasonFmt, configMapAPIPath(badConfigMap))}, },
expectConfig: lkgKC, expectConfig: lkgKC,
event: true, event: true,
}, },
@ -318,34 +395,45 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(combinedConfigMap) combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(combinedConfigMap)
framework.ExpectNoError(err) framework.ExpectNoError(err)
lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: combinedConfigMap.UID,
Namespace: combinedConfigMap.Namespace,
Name: combinedConfigMap.Name,
KubeletConfigKey: "kubelet",
}}
badSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: combinedConfigMap.UID,
Namespace: combinedConfigMap.Namespace,
Name: combinedConfigMap.Name,
KubeletConfigKey: badConfigKey,
}}
states := []configState{ states := []configState{
// intended lkg // intended lkg
{desc: "intended last-known-good", {desc: "intended last-known-good",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: lkgSource,
UID: combinedConfigMap.UID, expectConfigStatus: &configStateStatus{
Namespace: combinedConfigMap.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: combinedConfigMap.Name, Active: lkgSource,
KubeletConfigKey: "kubelet", Assigned: lkgSource,
}}, },
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue, SkipLkg: true,
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(combinedConfigMap)), },
Reason: status.CurRemoteOkayReason},
expectConfig: lkgKC, expectConfig: lkgKC,
event: true, event: true,
}, },
// bad config // bad config
{desc: "bad config", {desc: "bad config",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: badSource,
UID: combinedConfigMap.UID, expectConfigStatus: &configStateStatus{
Namespace: combinedConfigMap.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: combinedConfigMap.Name, Active: lkgSource,
KubeletConfigKey: badConfigKey, Assigned: badSource,
}}, LastKnownGood: lkgSource,
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse, Error: status.LoadError,
// TODO(mtaufen): status should be more informative, and report the key being used },
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, configMapAPIPath(combinedConfigMap)), },
Reason: fmt.Sprintf(status.CurFailLoadReasonFmt, configMapAPIPath(combinedConfigMap))},
expectConfig: lkgKC, expectConfig: lkgKC,
event: true, event: true,
}, },
@ -375,31 +463,42 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm2) cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm2)
framework.ExpectNoError(err) framework.ExpectNoError(err)
cm1Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: cm1.UID,
Namespace: cm1.Namespace,
Name: cm1.Name,
KubeletConfigKey: "kubelet",
}}
cm2Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
UID: cm2.UID,
Namespace: cm2.Namespace,
Name: cm2.Name,
KubeletConfigKey: "kubelet",
}}
states := []configState{ states := []configState{
{desc: "cm1", {desc: "cm1",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: cm1Source,
UID: cm1.UID, expectConfigStatus: &configStateStatus{
Namespace: cm1.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: cm1.Name, Active: cm1Source,
KubeletConfigKey: "kubelet", Assigned: cm1Source,
}}, },
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue, SkipLkg: true,
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm1)), },
Reason: status.CurRemoteOkayReason},
expectConfig: kc1, expectConfig: kc1,
event: true, event: true,
}, },
{desc: "cm2", {desc: "cm2",
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ configSource: cm2Source,
UID: cm2.UID, expectConfigStatus: &configStateStatus{
Namespace: cm2.Namespace, NodeConfigStatus: apiv1.NodeConfigStatus{
Name: cm2.Name, Active: cm2Source,
KubeletConfigKey: "kubelet", Assigned: cm2Source,
}}, },
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue, SkipLkg: true,
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm2)), },
Reason: status.CurRemoteOkayReason},
expectConfig: kc2, expectConfig: kc2,
event: true, event: true,
}, },
@ -424,7 +523,7 @@ func testBothDirections(f *framework.Framework, first *configState, states []con
time.Sleep(waitAfterFirst) time.Sleep(waitAfterFirst)
// for each state, set to that state, check condition and configz, then reset to first and check again // for each state, set to that state, check expectations, then reset to first and check again
for i := range states { for i := range states {
By(fmt.Sprintf("from %q to %q", first.desc, states[i].desc)) By(fmt.Sprintf("from %q to %q", first.desc, states[i].desc))
// from first -> states[i], states[i].event fully describes whether we should get a config change event // from first -> states[i], states[i].event fully describes whether we should get a config change event
@ -436,9 +535,8 @@ func testBothDirections(f *framework.Framework, first *configState, states []con
} }
} }
// setAndTestKubeletConfigState tests that after setting the config source, the KubeletConfigOk condition // setAndTestKubeletConfigState tests that after setting the config source, the node spec, status, configz, and latest event match
// and (if appropriate) configuration exposed via conifgz are as expected. // the expectations described by state.
// The configuration will be converted to the internal type prior to comparison.
func setAndTestKubeletConfigState(f *framework.Framework, state *configState, expectEvent bool) { func setAndTestKubeletConfigState(f *framework.Framework, state *configState, expectEvent bool) {
// set the desired state, retry a few times in case we are competing with other editors // set the desired state, retry a few times in case we are competing with other editors
Eventually(func() error { Eventually(func() error {
@ -459,8 +557,8 @@ func setAndTestKubeletConfigState(f *framework.Framework, state *configState, ex
} }
// check that config source actually got set to what we expect // check that config source actually got set to what we expect
checkNodeConfigSource(f, state.desc, state.configSource) checkNodeConfigSource(f, state.desc, state.configSource)
// check condition // check status
checkConfigOkCondition(f, state.desc, state.expectConfigOk) checkConfigStatus(f, state.desc, state.expectConfigStatus)
// check expectConfig // check expectConfig
if state.expectConfig != nil { if state.expectConfig != nil {
checkConfig(f, state.desc, state.expectConfig) checkConfig(f, state.desc, state.expectConfig)
@ -490,8 +588,8 @@ func checkNodeConfigSource(f *framework.Framework, desc string, expect *apiv1.No
}, timeout, interval).Should(BeNil()) }, timeout, interval).Should(BeNil())
} }
// make sure the ConfigOk node condition eventually matches what we expect // make sure the node status eventually matches what we expect
func checkConfigOkCondition(f *framework.Framework, desc string, expect *apiv1.NodeCondition) { func checkConfigStatus(f *framework.Framework, desc string, expect *configStateStatus) {
const ( const (
timeout = time.Minute timeout = time.Minute
interval = time.Second interval = time.Second
@ -499,30 +597,37 @@ func checkConfigOkCondition(f *framework.Framework, desc string, expect *apiv1.N
Eventually(func() error { Eventually(func() error {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("checkConfigOkCondition: case %s: %v", desc, err) return fmt.Errorf("checkConfigStatus: case %s: %v", desc, err)
} }
actual := getKubeletConfigOkCondition(node.Status.Conditions) if err := expectConfigStatus(expect, node.Status.Config); err != nil {
if actual == nil { return fmt.Errorf("checkConfigStatus: case %s: %v", desc, err)
return fmt.Errorf("checkConfigOkCondition: case %s: ConfigOk condition not found on node %q", desc, framework.TestContext.NodeName)
}
if err := expectConfigOk(expect, actual); err != nil {
return fmt.Errorf("checkConfigOkCondition: case %s: %v", desc, err)
} }
return nil return nil
}, timeout, interval).Should(BeNil()) }, timeout, interval).Should(BeNil())
} }
// if the actual matches the expect, return nil, else error explaining the mismatch func expectConfigStatus(expect *configStateStatus, actual *apiv1.NodeConfigStatus) error {
// if a subfield of the expect is the empty string, that check is skipped if expect == nil {
func expectConfigOk(expect, actual *apiv1.NodeCondition) error { return fmt.Errorf("expectConfigStatus requires expect to be non-nil (possible malformed test case)")
if expect.Status != actual.Status {
return fmt.Errorf("expected condition Status %q but got %q", expect.Status, actual.Status)
} }
if len(expect.Message) > 0 && expect.Message != actual.Message { if actual == nil {
return fmt.Errorf("expected condition Message %q but got %q", expect.Message, actual.Message) return fmt.Errorf("expectConfigStatus requires actual to be non-nil (possible Kubelet failed to update status)")
} }
if len(expect.Reason) > 0 && expect.Reason != actual.Reason { var errs []string
return fmt.Errorf("expected condition Reason %q but got %q", expect.Reason, actual.Reason) if !expect.SkipActive && !apiequality.Semantic.DeepEqual(expect.Active, actual.Active) {
errs = append(errs, fmt.Sprintf("expected Active %#v but got %#v", expect.Active, actual.Active))
}
if !expect.SkipAssigned && !apiequality.Semantic.DeepEqual(expect.Assigned, actual.Assigned) {
errs = append(errs, fmt.Sprintf("expected Assigned %#v but got %#v", expect.Assigned, actual.Assigned))
}
if !expect.SkipLkg && !apiequality.Semantic.DeepEqual(expect.LastKnownGood, actual.LastKnownGood) {
errs = append(errs, fmt.Sprintf("expected LastKnownGood %#v but got %#v", expect.LastKnownGood, actual.LastKnownGood))
}
if expect.Error != actual.Error {
errs = append(errs, fmt.Sprintf("expected Error %q but got %q", expect.Error, actual.Error))
}
if len(errs) > 0 {
return fmt.Errorf("%s", strings.Join(errs, ","))
} }
return nil return nil
} }

View File

@ -225,17 +225,6 @@ func setNodeConfigSource(f *framework.Framework, source *apiv1.NodeConfigSource)
return nil return nil
} }
// getKubeletConfigOkCondition returns the first NodeCondition in `cs` with Type == apiv1.NodeKubeletConfigOk,
// or if no such condition exists, returns nil.
func getKubeletConfigOkCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
for i := range cs {
if cs[i].Type == apiv1.NodeKubeletConfigOk {
return &cs[i]
}
}
return nil
}
// Causes the test to fail, or returns a status 200 response from the /configz endpoint // Causes the test to fail, or returns a status 200 response from the /configz endpoint
func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Response { func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Response {
endpoint := fmt.Sprintf("http://127.0.0.1:8080/api/v1/nodes/%s/proxy/configz", framework.TestContext.NodeName) endpoint := fmt.Sprintf("http://127.0.0.1:8080/api/v1/nodes/%s/proxy/configz", framework.TestContext.NodeName)