mirror of https://github.com/k3s-io/k3s
Move to a structured status for dynamic Kubelet config
Updates dynamic Kubelet config to use a structured status, rather than a node condition. This makes the status machine-readable, and thus more useful for config orchestration. Fixes: #56896pull/8/head
parent
7e75a09db6
commit
fcc1f8e7b6
|
@ -77278,6 +77278,27 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.NodeConfigStatus": {
|
||||
"description": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
|
||||
"properties": {
|
||||
"active": {
|
||||
"description": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource"
|
||||
},
|
||||
"assigned": {
|
||||
"description": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource"
|
||||
},
|
||||
"error": {
|
||||
"description": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.",
|
||||
"type": "string"
|
||||
},
|
||||
"lastKnownGood": {
|
||||
"description": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.NodeDaemonEndpoints": {
|
||||
"description": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.",
|
||||
"properties": {
|
||||
|
@ -77446,6 +77467,10 @@
|
|||
"x-kubernetes-patch-merge-key": "type",
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
},
|
||||
"config": {
|
||||
"description": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigStatus"
|
||||
},
|
||||
"daemonEndpoints": {
|
||||
"description": "Endpoints of daemons running on the Node.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.NodeDaemonEndpoints"
|
||||
|
|
|
@ -18813,6 +18813,10 @@
|
|||
"$ref": "v1.AttachedVolume"
|
||||
},
|
||||
"description": "List of volumes that are attached to the node."
|
||||
},
|
||||
"config": {
|
||||
"$ref": "v1.NodeConfigStatus",
|
||||
"description": "Status of the config assigned to the node via the dynamic Kubelet config feature."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -18993,6 +18997,28 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"v1.NodeConfigStatus": {
|
||||
"id": "v1.NodeConfigStatus",
|
||||
"description": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
|
||||
"properties": {
|
||||
"assigned": {
|
||||
"$ref": "v1.NodeConfigSource",
|
||||
"description": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned."
|
||||
},
|
||||
"active": {
|
||||
"$ref": "v1.NodeConfigSource",
|
||||
"description": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error."
|
||||
},
|
||||
"lastKnownGood": {
|
||||
"$ref": "v1.NodeConfigSource",
|
||||
"description": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future."
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions."
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.PersistentVolumeClaimList": {
|
||||
"id": "v1.PersistentVolumeClaimList",
|
||||
"description": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.",
|
||||
|
|
|
@ -198,25 +198,43 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
|
|||
}
|
||||
}
|
||||
|
||||
// TODO(#63305): always validate the combination of the local config file and flags, this is the fallback
|
||||
// when the dynamic config controller tells us to use local config (this can be fixed alongside other validation fixes).
|
||||
|
||||
// use dynamic kubelet config, if enabled
|
||||
var kubeletConfigController *dynamickubeletconfig.Controller
|
||||
if dynamicConfigDir := kubeletFlags.DynamicConfigDir.Value(); len(dynamicConfigDir) > 0 {
|
||||
kubeletConfig, kubeletConfigController, err = BootstrapKubeletConfigController(kubeletConfig, dynamicConfigDir)
|
||||
var dynamicKubeletConfig *kubeletconfiginternal.KubeletConfiguration
|
||||
dynamicKubeletConfig, kubeletConfigController, err = BootstrapKubeletConfigController(dynamicConfigDir)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
// We must enforce flag precedence by re-parsing the command line into the new object.
|
||||
// This is necessary to preserve backwards-compatibility across binary upgrades.
|
||||
// See issue #56171 for more details.
|
||||
if err := kubeletConfigFlagPrecedence(kubeletConfig, args); err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
// update feature gates based on new config
|
||||
if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil {
|
||||
glog.Fatal(err)
|
||||
// If we should just use our existing, local config, the controller will return a nil config
|
||||
if dynamicKubeletConfig != nil {
|
||||
kubeletConfig = dynamicKubeletConfig
|
||||
// We must enforce flag precedence by re-parsing the command line into the new object.
|
||||
// This is necessary to preserve backwards-compatibility across binary upgrades.
|
||||
// See issue #56171 for more details.
|
||||
if err := kubeletConfigFlagPrecedence(kubeletConfig, args); err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
// update feature gates based on new config
|
||||
if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(#63305): need to reconcile that validation performed inside the dynamic config controller
|
||||
// will happen against currently set feature gates, rather than future adjustments from combination of files
|
||||
// and flags. There's a potential scenario where a valid config (because it sets new gates) is considered
|
||||
// invalid against current gates (at least until --feature-gates flag is removed).
|
||||
// We should validate against the combination of current feature gates, overrides from feature gates in the file,
|
||||
// and overrides from feature gates set via flags, rather than currently set feature gates.
|
||||
// Once the --feature-gates flag is removed, we should strictly validate against the combination of current
|
||||
// feature gates and feature gates in the file (always need to validate against the combo, because feature-gates
|
||||
// can layer between the file and dynamic config right now - though maybe we should change this).
|
||||
|
||||
// construct a KubeletServer from kubeletFlags and kubeletConfig
|
||||
kubeletServer := &options.KubeletServer{
|
||||
KubeletFlags: *kubeletFlags,
|
||||
|
@ -1089,8 +1107,7 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) {
|
|||
}
|
||||
|
||||
// BootstrapKubeletConfigController constructs and bootstrap a configuration controller
|
||||
func BootstrapKubeletConfigController(defaultConfig *kubeletconfiginternal.KubeletConfiguration,
|
||||
dynamicConfigDir string) (*kubeletconfiginternal.KubeletConfiguration, *dynamickubeletconfig.Controller, error) {
|
||||
func BootstrapKubeletConfigController(dynamicConfigDir string) (*kubeletconfiginternal.KubeletConfiguration, *dynamickubeletconfig.Controller, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
return nil, nil, fmt.Errorf("failed to bootstrap Kubelet config controller, you must enable the DynamicKubeletConfig feature gate")
|
||||
}
|
||||
|
@ -1104,7 +1121,7 @@ func BootstrapKubeletConfigController(defaultConfig *kubeletconfiginternal.Kubel
|
|||
return nil, nil, fmt.Errorf("failed to get absolute path for --dynamic-config-dir=%s", dynamicConfigDir)
|
||||
}
|
||||
// get the latest KubeletConfiguration checkpoint from disk, or return the default config if no valid checkpoints exist
|
||||
c := dynamickubeletconfig.NewController(defaultConfig, dir)
|
||||
c := dynamickubeletconfig.NewController(dir)
|
||||
kc, err := c.Bootstrap()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to determine a valid configuration, error: %v", err)
|
||||
|
|
|
@ -2996,6 +2996,13 @@ The resulting set of endpoints can be viewed as:<br>
|
|||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_attachedvolume">v1.AttachedVolume</a> array</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">config</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Status of the config assigned to the node via the dynamic Kubelet config feature.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_nodeconfigstatus">v1.NodeConfigStatus</a></p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
|
@ -6498,6 +6505,40 @@ Examples:<br>
|
|||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
<div class="sect2">
|
||||
<h3 id="_v1_localvolumesource">v1.LocalVolumeSource</h3>
|
||||
<div class="paragraph">
|
||||
<p>Local represents directly-attached storage with node affinity (Beta feature)</p>
|
||||
</div>
|
||||
<table class="tableblock frame-all grid-all" style="width:100%; ">
|
||||
<colgroup>
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
</colgroup>
|
||||
<thead>
|
||||
<tr>
|
||||
<th class="tableblock halign-left valign-top">Name</th>
|
||||
<th class="tableblock halign-left valign-top">Description</th>
|
||||
<th class="tableblock halign-left valign-top">Required</th>
|
||||
<th class="tableblock halign-left valign-top">Schema</th>
|
||||
<th class="tableblock halign-left valign-top">Default</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">path</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">The full path to the volume on the node. It can be either a directory or block device (disk, partition, …). Directories can be represented only by PersistentVolume with VolumeMode=Filesystem. Block devices can be represented only by VolumeMode=Block, which also requires the BlockVolume alpha feature gate to be enabled.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
<div class="sect2">
|
||||
<h3 id="_v1_nodeselectorterm">v1.NodeSelectorTerm</h3>
|
||||
|
@ -6539,40 +6580,6 @@ Examples:<br>
|
|||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
<div class="sect2">
|
||||
<h3 id="_v1_localvolumesource">v1.LocalVolumeSource</h3>
|
||||
<div class="paragraph">
|
||||
<p>Local represents directly-attached storage with node affinity (Beta feature)</p>
|
||||
</div>
|
||||
<table class="tableblock frame-all grid-all" style="width:100%; ">
|
||||
<colgroup>
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
</colgroup>
|
||||
<thead>
|
||||
<tr>
|
||||
<th class="tableblock halign-left valign-top">Name</th>
|
||||
<th class="tableblock halign-left valign-top">Description</th>
|
||||
<th class="tableblock halign-left valign-top">Required</th>
|
||||
<th class="tableblock halign-left valign-top">Schema</th>
|
||||
<th class="tableblock halign-left valign-top">Default</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">path</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">The full path to the volume on the node. It can be either a directory or block device (disk, partition, …). Directories can be represented only by PersistentVolume with VolumeMode=Filesystem. Block devices can be represented only by VolumeMode=Block, which also requires the BlockVolume alpha feature gate to be enabled.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
<div class="sect2">
|
||||
<h3 id="_v1_selinuxoptions">v1.SELinuxOptions</h3>
|
||||
|
@ -7892,6 +7899,61 @@ Examples:<br>
|
|||
<div class="sect2">
|
||||
<h3 id="_v1_uniquevolumename">v1.UniqueVolumeName</h3>
|
||||
|
||||
</div>
|
||||
<div class="sect2">
|
||||
<h3 id="_v1_nodeconfigstatus">v1.NodeConfigStatus</h3>
|
||||
<div class="paragraph">
|
||||
<p>NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.</p>
|
||||
</div>
|
||||
<table class="tableblock frame-all grid-all" style="width:100%; ">
|
||||
<colgroup>
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
<col style="width:20%;">
|
||||
</colgroup>
|
||||
<thead>
|
||||
<tr>
|
||||
<th class="tableblock halign-left valign-top">Name</th>
|
||||
<th class="tableblock halign-left valign-top">Description</th>
|
||||
<th class="tableblock halign-left valign-top">Required</th>
|
||||
<th class="tableblock halign-left valign-top">Schema</th>
|
||||
<th class="tableblock halign-left valign-top">Default</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">assigned</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_nodeconfigsource">v1.NodeConfigSource</a></p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">active</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_nodeconfigsource">v1.NodeConfigSource</a></p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">lastKnownGood</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node’s method of determining config stability and correctness, as this may change or become configurable in the future.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_nodeconfigsource">v1.NodeConfigSource</a></p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">error</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
<div class="sect2">
|
||||
<h3 id="_v1_csipersistentvolumesource">v1.CSIPersistentVolumeSource</h3>
|
||||
|
|
|
@ -3348,6 +3348,53 @@ type NodeSystemInfo struct {
|
|||
Architecture string
|
||||
}
|
||||
|
||||
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
|
||||
type NodeConfigStatus struct {
|
||||
// Assigned reports the checkpointed config the node will try to use.
|
||||
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
|
||||
// config payload to local disk, along with a record indicating intended
|
||||
// config. The node refers to this record to choose its config checkpoint, and
|
||||
// reports this record in Assigned. Assigned only updates in the status after
|
||||
// the record has been checkpointed to disk. When the Kubelet is restarted,
|
||||
// it tries to make the Assigned config the Active config by loading and
|
||||
// validating the checkpointed payload identified by Assigned.
|
||||
// +optional
|
||||
Assigned *NodeConfigSource
|
||||
// Active reports the checkpointed config the node is actively using.
|
||||
// Active will represent either the current version of the Assigned config,
|
||||
// or the current LastKnownGood config, depending on whether attempting to use the
|
||||
// Assigned config results in an error.
|
||||
// +optional
|
||||
Active *NodeConfigSource
|
||||
// LastKnownGood reports the checkpointed config the node will fall back to
|
||||
// when it encounters an error attempting to use the Assigned config.
|
||||
// The Assigned config becomes the LastKnownGood config when the node determines
|
||||
// that the Assigned config is stable and correct.
|
||||
// This is currently implemented as a 10-minute soak period starting when the local
|
||||
// record of Assigned config is updated. If the Assigned config is Active at the end
|
||||
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
|
||||
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
|
||||
// because the local default config is always assumed good.
|
||||
// You should not make assumptions about the node's method of determining config stability
|
||||
// and correctness, as this may change or become configurable in the future.
|
||||
// +optional
|
||||
LastKnownGood *NodeConfigSource
|
||||
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
|
||||
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
|
||||
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
|
||||
// to load or validate the Assigned config, etc.
|
||||
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
|
||||
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
|
||||
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
|
||||
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
|
||||
// by fixing the config assigned in Spec.ConfigSource.
|
||||
// You can find additional information for debugging by searching the error message in the Kubelet log.
|
||||
// Error is a human-readable description of the error state; machines can check whether or not Error
|
||||
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
|
||||
// +optional
|
||||
Error string
|
||||
}
|
||||
|
||||
// NodeStatus is information about the current status of a node.
|
||||
type NodeStatus struct {
|
||||
// Capacity represents the total resources of a node.
|
||||
|
@ -3380,6 +3427,9 @@ type NodeStatus struct {
|
|||
// List of volumes that are attached to the node.
|
||||
// +optional
|
||||
VolumesAttached []AttachedVolume
|
||||
// Status of the config assigned to the node via the dynamic Kubelet config feature.
|
||||
// +optional
|
||||
Config *NodeConfigStatus
|
||||
}
|
||||
|
||||
type UniqueVolumeName string
|
||||
|
@ -3464,8 +3514,6 @@ const (
|
|||
NodeDiskPressure NodeConditionType = "DiskPressure"
|
||||
// NodeNetworkUnavailable means that network for the node is not correctly configured.
|
||||
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
|
||||
// NodeKubeletConfigOk indicates whether the kubelet is correctly configured
|
||||
NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
|
||||
)
|
||||
|
||||
type NodeCondition struct {
|
||||
|
|
|
@ -210,6 +210,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_core_NodeCondition_To_v1_NodeCondition,
|
||||
Convert_v1_NodeConfigSource_To_core_NodeConfigSource,
|
||||
Convert_core_NodeConfigSource_To_v1_NodeConfigSource,
|
||||
Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus,
|
||||
Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus,
|
||||
Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints,
|
||||
Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints,
|
||||
Convert_v1_NodeList_To_core_NodeList,
|
||||
|
@ -2635,6 +2637,32 @@ func Convert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSou
|
|||
return autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *v1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error {
|
||||
out.Assigned = (*core.NodeConfigSource)(unsafe.Pointer(in.Assigned))
|
||||
out.Active = (*core.NodeConfigSource)(unsafe.Pointer(in.Active))
|
||||
out.LastKnownGood = (*core.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood))
|
||||
out.Error = in.Error
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus is an autogenerated conversion function.
|
||||
func Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *v1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *v1.NodeConfigStatus, s conversion.Scope) error {
|
||||
out.Assigned = (*v1.NodeConfigSource)(unsafe.Pointer(in.Assigned))
|
||||
out.Active = (*v1.NodeConfigSource)(unsafe.Pointer(in.Active))
|
||||
out.LastKnownGood = (*v1.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood))
|
||||
out.Error = in.Error
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus is an autogenerated conversion function.
|
||||
func Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *v1.NodeConfigStatus, s conversion.Scope) error {
|
||||
return autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error {
|
||||
if err := Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil {
|
||||
return err
|
||||
|
@ -2832,6 +2860,7 @@ func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.N
|
|||
out.Images = *(*[]core.ContainerImage)(unsafe.Pointer(&in.Images))
|
||||
out.VolumesInUse = *(*[]core.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse))
|
||||
out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached))
|
||||
out.Config = (*core.NodeConfigStatus)(unsafe.Pointer(in.Config))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2855,6 +2884,7 @@ func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.N
|
|||
out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images))
|
||||
out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse))
|
||||
out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached))
|
||||
out.Config = (*v1.NodeConfigStatus)(unsafe.Pointer(in.Config))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -4010,9 +4010,14 @@ func ValidateNode(node *core.Node) field.ErrorList {
|
|||
// That said, if specified, we need to ensure they are valid.
|
||||
allErrs = append(allErrs, ValidateNodeResources(node)...)
|
||||
|
||||
// Only allow Node.Spec.ConfigSource to be set if the DynamicKubeletConfig feature gate is enabled
|
||||
if node.Spec.ConfigSource != nil && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)"))
|
||||
// Only allow Spec.ConfigSource and Status.Config to be set if the DynamicKubeletConfig feature gate is enabled
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
if node.Spec.ConfigSource != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)"))
|
||||
}
|
||||
if node.Status.Config != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "config"), "config may only be set if the DynamicKubeletConfig feature gate is enabled)"))
|
||||
}
|
||||
}
|
||||
|
||||
if len(node.Spec.PodCIDR) != 0 {
|
||||
|
@ -4097,6 +4102,18 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
|
|||
}
|
||||
}
|
||||
|
||||
// Allow and validate updates to Node.Spec.ConfigSource and Node.Status.Config if DynamicKubeletConfig feature gate is enabled
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
if node.Spec.ConfigSource != nil {
|
||||
allErrs = append(allErrs, validateNodeConfigSourceSpec(node.Spec.ConfigSource, field.NewPath("spec", "configSource"))...)
|
||||
}
|
||||
oldNode.Spec.ConfigSource = node.Spec.ConfigSource
|
||||
if node.Status.Config != nil {
|
||||
allErrs = append(allErrs, validateNodeConfigStatus(node.Status.Config, field.NewPath("status", "config"))...)
|
||||
}
|
||||
oldNode.Status.Config = node.Status.Config
|
||||
}
|
||||
|
||||
// TODO: move reset function to its own location
|
||||
// Ignore metadata changes now that they have been tested
|
||||
oldNode.ObjectMeta = node.ObjectMeta
|
||||
|
@ -4113,14 +4130,6 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
|
|||
}
|
||||
oldNode.Spec.Taints = node.Spec.Taints
|
||||
|
||||
// Allow updates to Node.Spec.ConfigSource if DynamicKubeletConfig feature gate is enabled
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
if node.Spec.ConfigSource != nil {
|
||||
allErrs = append(allErrs, validateNodeConfigSourceSpec(node.Spec.ConfigSource, field.NewPath("spec", "configSource"))...)
|
||||
}
|
||||
oldNode.Spec.ConfigSource = node.Spec.ConfigSource
|
||||
}
|
||||
|
||||
// We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields.
|
||||
// TODO: Add a 'real' error type for this error and provide print actual diffs.
|
||||
if !apiequality.Semantic.DeepEqual(oldNode, node) {
|
||||
|
@ -4162,12 +4171,56 @@ func validateConfigMapNodeConfigSourceSpec(source *core.ConfigMapNodeConfigSourc
|
|||
return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...)
|
||||
}
|
||||
|
||||
// validation specififc to Node.Status.Config
|
||||
func validateNodeConfigStatus(status *core.NodeConfigStatus, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if status.Assigned != nil {
|
||||
allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Assigned, fldPath.Child("assigned"))...)
|
||||
}
|
||||
if status.Active != nil {
|
||||
allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Active, fldPath.Child("active"))...)
|
||||
}
|
||||
if status.LastKnownGood != nil {
|
||||
allErrs = append(allErrs, validateNodeConfigSourceStatus(status.LastKnownGood, fldPath.Child("lastKnownGood"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood)
|
||||
func validateNodeConfigSourceStatus(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
count := int(0)
|
||||
if source.ConfigMap != nil {
|
||||
count++
|
||||
allErrs = append(allErrs, validateConfigMapNodeConfigSourceStatus(source.ConfigMap, fldPath.Child("configMap"))...)
|
||||
}
|
||||
// add more subfields here in the future as they are added to NodeConfigSource
|
||||
|
||||
// exactly one reference subfield must be non-nil
|
||||
if count != 1 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, source, "exactly one reference subfield must be non-nil"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood).ConfigMap
|
||||
func validateConfigMapNodeConfigSourceStatus(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if string(source.UID) == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("uid"), "uid must be set in status"))
|
||||
}
|
||||
// TODO(#63221): require ResourceVersion in status when we start respecting ConfigMap mutations (the Kubelet isn't tracking it internally until
|
||||
// that PR, which makes it difficult to report for now).
|
||||
return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...)
|
||||
}
|
||||
|
||||
// common validation
|
||||
func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// validate target configmap namespace
|
||||
if source.Namespace == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "namespace must be set in spec"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "namespace must be set"))
|
||||
} else {
|
||||
for _, msg := range ValidateNameFunc(ValidateNamespaceName)(source.Namespace, false) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), source.Namespace, msg))
|
||||
|
@ -4175,7 +4228,7 @@ func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, f
|
|||
}
|
||||
// validate target configmap name
|
||||
if source.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name must be set in spec"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name must be set"))
|
||||
} else {
|
||||
for _, msg := range ValidateNameFunc(ValidateConfigMapName)(source.Name, false) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), source.Name, msg))
|
||||
|
@ -4183,7 +4236,7 @@ func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, f
|
|||
}
|
||||
// validate kubeletConfigKey against rules for configMap key names
|
||||
if source.KubeletConfigKey == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("kubeletConfigKey"), "kubeletConfigKey must be set in spec"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("kubeletConfigKey"), "kubeletConfigKey must be set"))
|
||||
} else {
|
||||
for _, msg := range validation.IsConfigMapKey(source.KubeletConfigKey) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("kubeletConfigKey"), source.KubeletConfigKey, msg))
|
||||
|
|
|
@ -2402,6 +2402,49 @@ func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) {
|
||||
*out = *in
|
||||
if in.Assigned != nil {
|
||||
in, out := &in.Assigned, &out.Assigned
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(NodeConfigSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.Active != nil {
|
||||
in, out := &in.Active, &out.Active
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(NodeConfigSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.LastKnownGood != nil {
|
||||
in, out := &in.LastKnownGood, &out.LastKnownGood
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(NodeConfigSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus.
|
||||
func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeConfigStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) {
|
||||
*out = *in
|
||||
|
@ -2654,6 +2697,15 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
|
|||
*out = make([]AttachedVolume, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Config != nil {
|
||||
in, out := &in.Config, &out.Config
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(NodeConfigStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -55,13 +55,14 @@ type RemoteConfigSource interface {
|
|||
// Download downloads the remote config source object returns a Payload backed by the object,
|
||||
// or a sanitized failure reason and error if the download fails
|
||||
Download(client clientset.Interface) (Payload, string, error)
|
||||
// Encode returns a []byte representation of the object behind the RemoteConfigSource
|
||||
// Encode returns a []byte representation of the NodeConfigSource behind the RemoteConfigSource
|
||||
Encode() ([]byte, error)
|
||||
|
||||
// object returns the underlying source object.
|
||||
// If you want to compare sources for equality, use EqualRemoteConfigSources,
|
||||
// which compares the underlying source objects for semantic API equality.
|
||||
object() interface{}
|
||||
// NodeConfigSource returns a copy of the underlying apiv1.NodeConfigSource object.
|
||||
// All RemoteConfigSources are expected to be backed by a NodeConfigSource,
|
||||
// though the convenience methods on the interface will target the source
|
||||
// type that was detected in a call to NewRemoteConfigSource.
|
||||
NodeConfigSource() *apiv1.NodeConfigSource
|
||||
}
|
||||
|
||||
// NewRemoteConfigSource constructs a RemoteConfigSource from a v1/NodeConfigSource object
|
||||
|
@ -72,9 +73,10 @@ func NewRemoteConfigSource(source *apiv1.NodeConfigSource) (RemoteConfigSource,
|
|||
// nil here, so that if a new API server allows a new config source type, old clients can send
|
||||
// an error message rather than crashing due to a nil pointer dereference.
|
||||
|
||||
// exactly one reference subfield of the config source must be non-nil, today ConfigMap is the only reference subfield
|
||||
// Exactly one reference subfield of the config source must be non-nil.
|
||||
// Currently ConfigMap is the only reference subfield.
|
||||
if source.ConfigMap == nil {
|
||||
return nil, status.FailSyncReasonAllNilSubfields, fmt.Errorf("%s, NodeConfigSource was: %#v", status.FailSyncReasonAllNilSubfields, source)
|
||||
return nil, status.AllNilSubfieldsError, fmt.Errorf("%s, NodeConfigSource was: %#v", status.AllNilSubfieldsError, source)
|
||||
}
|
||||
return &remoteConfigMap{source}, "", nil
|
||||
}
|
||||
|
@ -109,7 +111,7 @@ func DecodeRemoteConfigSource(data []byte) (RemoteConfigSource, error) {
|
|||
// comparing the underlying API objects for semantic equality.
|
||||
func EqualRemoteConfigSources(a, b RemoteConfigSource) bool {
|
||||
if a != nil && b != nil {
|
||||
return apiequality.Semantic.DeepEqual(a.object(), b.object())
|
||||
return apiequality.Semantic.DeepEqual(a.NodeConfigSource(), b.NodeConfigSource())
|
||||
}
|
||||
return a == b
|
||||
}
|
||||
|
@ -145,13 +147,12 @@ func (r *remoteConfigMap) Download(client clientset.Interface) (Payload, string,
|
|||
// get the ConfigMap via namespace/name, there doesn't seem to be a way to get it by UID
|
||||
cm, err := client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(r.source.ConfigMap.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
reason = fmt.Sprintf(status.FailSyncReasonDownloadFmt, r.APIPath())
|
||||
return nil, reason, fmt.Errorf("%s, error: %v", reason, err)
|
||||
return nil, status.DownloadError, fmt.Errorf("%s, error: %v", status.DownloadError, err)
|
||||
}
|
||||
|
||||
// ensure that UID matches the UID on the source
|
||||
if r.source.ConfigMap.UID != cm.UID {
|
||||
reason = fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, r.source.ConfigMap.UID, r.APIPath(), cm.UID)
|
||||
reason = fmt.Sprintf(status.UIDMismatchErrorFmt, r.source.ConfigMap.UID, r.APIPath(), cm.UID)
|
||||
return nil, reason, fmt.Errorf(reason)
|
||||
}
|
||||
|
||||
|
@ -178,6 +179,6 @@ func (r *remoteConfigMap) Encode() ([]byte, error) {
|
|||
return data, nil
|
||||
}
|
||||
|
||||
func (r *remoteConfigMap) object() interface{} {
|
||||
return r.source
|
||||
func (r *remoteConfigMap) NodeConfigSource() *apiv1.NodeConfigSource {
|
||||
return r.source.DeepCopy()
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ func TestNewRemoteConfigSource(t *testing.T) {
|
|||
return
|
||||
}
|
||||
// underlying object should match the object passed in
|
||||
if !apiequality.Semantic.DeepEqual(c.expect.object(), source.object()) {
|
||||
if !apiequality.Semantic.DeepEqual(c.expect.NodeConfigSource(), source.NodeConfigSource()) {
|
||||
t.Errorf("case %q, expect RemoteConfigSource %s but got %s", c.desc, spew.Sdump(c.expect), spew.Sdump(source))
|
||||
}
|
||||
})
|
||||
|
|
|
@ -24,9 +24,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
|
||||
)
|
||||
|
||||
// so far only implements Current(), LastKnownGood(), SetCurrent(), and SetLastKnownGood()
|
||||
// so far only implements Assigned(), LastKnownGood(), SetAssigned(), and SetLastKnownGood()
|
||||
type fakeStore struct {
|
||||
current checkpoint.RemoteConfigSource
|
||||
assigned checkpoint.RemoteConfigSource
|
||||
lastKnownGood checkpoint.RemoteConfigSource
|
||||
}
|
||||
|
||||
|
@ -48,20 +48,20 @@ func (s *fakeStore) Load(source checkpoint.RemoteConfigSource) (*kubeletconfig.K
|
|||
return nil, fmt.Errorf("Load method not supported")
|
||||
}
|
||||
|
||||
func (s *fakeStore) CurrentModified() (time.Time, error) {
|
||||
return time.Time{}, fmt.Errorf("CurrentModified method not supported")
|
||||
func (s *fakeStore) AssignedModified() (time.Time, error) {
|
||||
return time.Time{}, fmt.Errorf("AssignedModified method not supported")
|
||||
}
|
||||
|
||||
func (s *fakeStore) Current() (checkpoint.RemoteConfigSource, error) {
|
||||
return s.current, nil
|
||||
func (s *fakeStore) Assigned() (checkpoint.RemoteConfigSource, error) {
|
||||
return s.assigned, nil
|
||||
}
|
||||
|
||||
func (s *fakeStore) LastKnownGood() (checkpoint.RemoteConfigSource, error) {
|
||||
return s.lastKnownGood, nil
|
||||
}
|
||||
|
||||
func (s *fakeStore) SetCurrent(source checkpoint.RemoteConfigSource) error {
|
||||
s.current = source
|
||||
func (s *fakeStore) SetAssigned(source checkpoint.RemoteConfigSource) error {
|
||||
s.assigned = source
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
const (
|
||||
metaDir = "meta"
|
||||
currentFile = "current"
|
||||
assignedFile = "assigned"
|
||||
lastKnownGoodFile = "last-known-good"
|
||||
|
||||
checkpointsDir = "checkpoints"
|
||||
|
@ -61,11 +61,11 @@ func (s *fsStore) Initialize() error {
|
|||
if err := utilfiles.EnsureDir(s.fs, s.dir); err != nil {
|
||||
return err
|
||||
}
|
||||
// ensure metadata directory and reference files (tracks current and lkg configs)
|
||||
// ensure metadata directory and reference files (tracks assigned and lkg configs)
|
||||
if err := utilfiles.EnsureDir(s.fs, filepath.Join(s.dir, metaDir)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := utilfiles.EnsureFile(s.fs, s.metaPath(currentFile)); err != nil {
|
||||
if err := utilfiles.EnsureFile(s.fs, s.metaPath(assignedFile)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := utilfiles.EnsureFile(s.fs, s.metaPath(lastKnownGoodFile)); err != nil {
|
||||
|
@ -111,8 +111,8 @@ func (s *fsStore) Load(source checkpoint.RemoteConfigSource) (*kubeletconfig.Kub
|
|||
return kc, nil
|
||||
}
|
||||
|
||||
func (s *fsStore) CurrentModified() (time.Time, error) {
|
||||
path := s.metaPath(currentFile)
|
||||
func (s *fsStore) AssignedModified() (time.Time, error) {
|
||||
path := s.metaPath(assignedFile)
|
||||
info, err := s.fs.Stat(path)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to stat %q while checking modification time, error: %v", path, err)
|
||||
|
@ -120,16 +120,16 @@ func (s *fsStore) CurrentModified() (time.Time, error) {
|
|||
return info.ModTime(), nil
|
||||
}
|
||||
|
||||
func (s *fsStore) Current() (checkpoint.RemoteConfigSource, error) {
|
||||
return readRemoteConfigSource(s.fs, s.metaPath(currentFile))
|
||||
func (s *fsStore) Assigned() (checkpoint.RemoteConfigSource, error) {
|
||||
return readRemoteConfigSource(s.fs, s.metaPath(assignedFile))
|
||||
}
|
||||
|
||||
func (s *fsStore) LastKnownGood() (checkpoint.RemoteConfigSource, error) {
|
||||
return readRemoteConfigSource(s.fs, s.metaPath(lastKnownGoodFile))
|
||||
}
|
||||
|
||||
func (s *fsStore) SetCurrent(source checkpoint.RemoteConfigSource) error {
|
||||
return writeRemoteConfigSource(s.fs, s.metaPath(currentFile), source)
|
||||
func (s *fsStore) SetAssigned(source checkpoint.RemoteConfigSource) error {
|
||||
return writeRemoteConfigSource(s.fs, s.metaPath(assignedFile), source)
|
||||
}
|
||||
|
||||
func (s *fsStore) SetLastKnownGood(source checkpoint.RemoteConfigSource) error {
|
||||
|
|
|
@ -87,9 +87,9 @@ func TestFsStoreInitialize(t *testing.T) {
|
|||
t.Fatalf("expect %q to exist, but stat failed with error: %v", store.checkpointPath(""), err)
|
||||
}
|
||||
|
||||
// check that currentFile exists
|
||||
if _, err := store.fs.Stat(store.metaPath(currentFile)); err != nil {
|
||||
t.Fatalf("expect %q to exist, but stat failed with error: %v", store.metaPath(currentFile), err)
|
||||
// check that assignedFile exists
|
||||
if _, err := store.fs.Stat(store.metaPath(assignedFile)); err != nil {
|
||||
t.Fatalf("expect %q to exist, but stat failed with error: %v", store.metaPath(assignedFile), err)
|
||||
}
|
||||
|
||||
// check that lastKnownGoodFile exists
|
||||
|
@ -270,34 +270,34 @@ func TestFsStoreLoad(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFsStoreCurrentModified(t *testing.T) {
|
||||
func TestFsStoreAssignedModified(t *testing.T) {
|
||||
store, err := newInitializedFakeFsStore()
|
||||
if err != nil {
|
||||
t.Fatalf("error constructing store: %v", err)
|
||||
}
|
||||
|
||||
// create an empty current file, this is good enough for testing
|
||||
saveTestSourceFile(t, store, currentFile, nil)
|
||||
// create an empty assigned file, this is good enough for testing
|
||||
saveTestSourceFile(t, store, assignedFile, nil)
|
||||
|
||||
// set the timestamps to the current time, so we can compare to result of store.CurrentModified
|
||||
// set the timestamps to the current time, so we can compare to result of store.AssignedModified
|
||||
now := time.Now()
|
||||
err = store.fs.Chtimes(store.metaPath(currentFile), now, now)
|
||||
err = store.fs.Chtimes(store.metaPath(assignedFile), now, now)
|
||||
if err != nil {
|
||||
t.Fatalf("could not change timestamps, error: %v", err)
|
||||
}
|
||||
|
||||
// for now we hope that the system won't truncate the time to a less precise unit,
|
||||
// if this test fails on certain systems that may be the reason.
|
||||
modTime, err := store.CurrentModified()
|
||||
modTime, err := store.AssignedModified()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to determine modification time of current config source, error: %v", err)
|
||||
t.Fatalf("unable to determine modification time of assigned config source, error: %v", err)
|
||||
}
|
||||
if !now.Equal(modTime) {
|
||||
t.Errorf("expect %q but got %q", now.Format(time.RFC3339), modTime.Format(time.RFC3339))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFsStoreCurrent(t *testing.T) {
|
||||
func TestFsStoreAssigned(t *testing.T) {
|
||||
store, err := newInitializedFakeFsStore()
|
||||
if err != nil {
|
||||
t.Fatalf("error constructing store: %v", err)
|
||||
|
@ -325,10 +325,10 @@ func TestFsStoreCurrent(t *testing.T) {
|
|||
for _, c := range cases {
|
||||
t.Run(c.desc, func(t *testing.T) {
|
||||
// save the last known good source
|
||||
saveTestSourceFile(t, store, currentFile, c.expect)
|
||||
saveTestSourceFile(t, store, assignedFile, c.expect)
|
||||
|
||||
// load last-known-good and compare to expected result
|
||||
source, err := store.Current()
|
||||
source, err := store.Assigned()
|
||||
utiltest.ExpectError(t, err, c.err)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -383,7 +383,7 @@ func TestFsStoreLastKnownGood(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFsStoreSetCurrent(t *testing.T) {
|
||||
func TestFsStoreSetAssigned(t *testing.T) {
|
||||
store, err := newInitializedFakeFsStore()
|
||||
if err != nil {
|
||||
t.Fatalf("error constructing store: %v", err)
|
||||
|
@ -409,15 +409,15 @@ source:
|
|||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// save the current source
|
||||
if err := store.SetCurrent(source); err != nil {
|
||||
// save the assigned source
|
||||
if err := store.SetAssigned(source); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// check that the source saved as we would expect
|
||||
data := readTestSourceFile(t, store, currentFile)
|
||||
data := readTestSourceFile(t, store, assignedFile)
|
||||
if expect != string(data) {
|
||||
t.Errorf("expect current source file to contain %q, but got %q", expect, string(data))
|
||||
t.Errorf("expect assigned source file to contain %q, but got %q", expect, string(data))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -485,7 +485,7 @@ func TestFsStoreReset(t *testing.T) {
|
|||
}
|
||||
cases := []struct {
|
||||
desc string
|
||||
current checkpoint.RemoteConfigSource
|
||||
assigned checkpoint.RemoteConfigSource
|
||||
lastKnownGood checkpoint.RemoteConfigSource
|
||||
updated bool
|
||||
}{
|
||||
|
@ -499,7 +499,7 @@ func TestFsStoreReset(t *testing.T) {
|
|||
for _, c := range cases {
|
||||
t.Run(c.desc, func(t *testing.T) {
|
||||
// manually save the sources to their respective files
|
||||
saveTestSourceFile(t, store, currentFile, c.current)
|
||||
saveTestSourceFile(t, store, assignedFile, c.assigned)
|
||||
saveTestSourceFile(t, store, lastKnownGoodFile, c.lastKnownGood)
|
||||
|
||||
// reset
|
||||
|
@ -509,15 +509,15 @@ func TestFsStoreReset(t *testing.T) {
|
|||
}
|
||||
|
||||
// make sure the files were emptied
|
||||
if size := testSourceFileSize(t, store, currentFile); size > 0 {
|
||||
t.Errorf("case %q, expect source file %q to be empty but got %d bytes", c.desc, currentFile, size)
|
||||
if size := testSourceFileSize(t, store, assignedFile); size > 0 {
|
||||
t.Errorf("case %q, expect source file %q to be empty but got %d bytes", c.desc, assignedFile, size)
|
||||
}
|
||||
if size := testSourceFileSize(t, store, lastKnownGoodFile); size > 0 {
|
||||
t.Errorf("case %q, expect source file %q to be empty but got %d bytes", c.desc, lastKnownGoodFile, size)
|
||||
}
|
||||
|
||||
// make sure Current() and LastKnownGood() both return nil
|
||||
current, err := store.Current()
|
||||
// make sure Assigned() and LastKnownGood() both return nil
|
||||
assigned, err := store.Assigned()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -525,9 +525,9 @@ func TestFsStoreReset(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if current != nil || lastKnownGood != nil {
|
||||
t.Errorf("case %q, expect nil for current and last-known-good checkpoints, but still have %q and %q, respectively",
|
||||
c.desc, current, lastKnownGood)
|
||||
if assigned != nil || lastKnownGood != nil {
|
||||
t.Errorf("case %q, expect nil for assigned and last-known-good checkpoints, but still have %q and %q, respectively",
|
||||
c.desc, assigned, lastKnownGood)
|
||||
}
|
||||
if c.updated != updated {
|
||||
t.Errorf("case %q, expect reset to return %t, but got %t", c.desc, c.updated, updated)
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
|
||||
)
|
||||
|
||||
// Store saves checkpoints and information about which is the current and last-known-good checkpoint to a storage layer
|
||||
// Store saves checkpoints and information about which is the assigned and last-known-good checkpoint to a storage layer
|
||||
type Store interface {
|
||||
// Initialize sets up the storage layer
|
||||
Initialize() error
|
||||
|
@ -38,32 +38,32 @@ type Store interface {
|
|||
// Load loads the KubeletConfiguration from the checkpoint referenced by `source`.
|
||||
Load(source checkpoint.RemoteConfigSource) (*kubeletconfig.KubeletConfiguration, error)
|
||||
|
||||
// CurrentModified returns the last time that the current UID was set
|
||||
CurrentModified() (time.Time, error)
|
||||
// Current returns the source that points to the current checkpoint, or nil if no current checkpoint is set
|
||||
Current() (checkpoint.RemoteConfigSource, error)
|
||||
// AssignedModified returns the last time that the assigned checkpoint was set
|
||||
AssignedModified() (time.Time, error)
|
||||
// Assigned returns the source that points to the checkpoint currently assigned to the Kubelet, or nil if no assigned checkpoint is set
|
||||
Assigned() (checkpoint.RemoteConfigSource, error)
|
||||
// LastKnownGood returns the source that points to the last-known-good checkpoint, or nil if no last-known-good checkpoint is set
|
||||
LastKnownGood() (checkpoint.RemoteConfigSource, error)
|
||||
|
||||
// SetCurrent saves the source that points to the current checkpoint, set to nil to unset
|
||||
SetCurrent(source checkpoint.RemoteConfigSource) error
|
||||
// SetAssigned saves the source that points to the assigned checkpoint, set to nil to unset
|
||||
SetAssigned(source checkpoint.RemoteConfigSource) error
|
||||
// SetLastKnownGood saves the source that points to the last-known-good checkpoint, set to nil to unset
|
||||
SetLastKnownGood(source checkpoint.RemoteConfigSource) error
|
||||
// Reset unsets the current and last-known-good UIDs and returns whether the current UID was unset as a result of the reset
|
||||
// Reset unsets the assigned and last-known-good checkpoints and returns whether the assigned checkpoint was unset as a result of the reset
|
||||
Reset() (bool, error)
|
||||
}
|
||||
|
||||
// reset is a helper for implementing Reset, which can be implemented in terms of Store methods
|
||||
func reset(s Store) (bool, error) {
|
||||
current, err := s.Current()
|
||||
assigned, err := s.Assigned()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := s.SetLastKnownGood(nil); err != nil {
|
||||
return false, fmt.Errorf("failed to reset last-known-good UID in checkpoint store, error: %v", err)
|
||||
}
|
||||
if err := s.SetCurrent(nil); err != nil {
|
||||
return false, fmt.Errorf("failed to reset current UID in checkpoint store, error: %v", err)
|
||||
if err := s.SetAssigned(nil); err != nil {
|
||||
return false, fmt.Errorf("failed to reset assigned UID in checkpoint store, error: %v", err)
|
||||
}
|
||||
return current != nil, nil
|
||||
return assigned != nil, nil
|
||||
}
|
||||
|
|
|
@ -48,21 +48,21 @@ func TestReset(t *testing.T) {
|
|||
s *fakeStore
|
||||
updated bool
|
||||
}{
|
||||
{&fakeStore{current: nil, lastKnownGood: nil}, false},
|
||||
{&fakeStore{current: source, lastKnownGood: nil}, true},
|
||||
{&fakeStore{current: nil, lastKnownGood: source}, false},
|
||||
{&fakeStore{current: source, lastKnownGood: source}, true},
|
||||
{&fakeStore{current: source, lastKnownGood: otherSource}, true},
|
||||
{&fakeStore{current: otherSource, lastKnownGood: source}, true},
|
||||
{&fakeStore{assigned: nil, lastKnownGood: nil}, false},
|
||||
{&fakeStore{assigned: source, lastKnownGood: nil}, true},
|
||||
{&fakeStore{assigned: nil, lastKnownGood: source}, false},
|
||||
{&fakeStore{assigned: source, lastKnownGood: source}, true},
|
||||
{&fakeStore{assigned: source, lastKnownGood: otherSource}, true},
|
||||
{&fakeStore{assigned: otherSource, lastKnownGood: source}, true},
|
||||
}
|
||||
for _, c := range cases {
|
||||
updated, err := reset(c.s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if c.s.current != nil || c.s.lastKnownGood != nil {
|
||||
t.Errorf("case %q, expect nil for current and last-known-good checkpoints, but still have %q and %q, respectively",
|
||||
spew.Sdump(c.s), c.s.current, c.s.lastKnownGood)
|
||||
if c.s.assigned != nil || c.s.lastKnownGood != nil {
|
||||
t.Errorf("case %q, expect nil for assigned and last-known-good checkpoints, but still have %q and %q, respectively",
|
||||
spew.Sdump(c.s), c.s.assigned, c.s.lastKnownGood)
|
||||
}
|
||||
if c.updated != updated {
|
||||
t.Errorf("case %q, expect reset to return %t, but got %t", spew.Sdump(c.s), c.updated, updated)
|
||||
|
|
|
@ -71,14 +71,14 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
|
|||
|
||||
node, err := latestNode(cc.informer.GetStore(), nodeName)
|
||||
if err != nil {
|
||||
cc.configOk.SetFailSyncCondition(status.FailSyncReasonInformer)
|
||||
syncerr = fmt.Errorf("%s, error: %v", status.FailSyncReasonInformer, err)
|
||||
cc.configStatus.SetErrorOverride(fmt.Sprintf(status.SyncErrorFmt, status.InternalError))
|
||||
syncerr = fmt.Errorf("%s, error: %v", status.InternalError, err)
|
||||
return
|
||||
}
|
||||
|
||||
// check the Node and download any new config
|
||||
if updated, cur, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil {
|
||||
cc.configOk.SetFailSyncCondition(reason)
|
||||
cc.configStatus.SetErrorOverride(fmt.Sprintf(status.SyncErrorFmt, reason))
|
||||
syncerr = fmt.Errorf("%s, error: %v", reason, err)
|
||||
return
|
||||
} else if updated {
|
||||
|
@ -99,8 +99,8 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
|
|||
// If we get here:
|
||||
// - there is no need to restart to update the current config
|
||||
// - there was no error trying to sync configuration
|
||||
// - if, previously, there was an error trying to sync configuration, we need to clear that error from the condition
|
||||
cc.configOk.ClearFailSyncCondition()
|
||||
// - if, previously, there was an error trying to sync configuration, we need to clear that error from the status
|
||||
cc.configStatus.SetErrorOverride("")
|
||||
}
|
||||
|
||||
// doSyncConfigSource checkpoints and sets the store's current config to the new config or resets config,
|
||||
|
@ -125,7 +125,7 @@ func (cc *Controller) doSyncConfigSource(client clientset.Interface, source *api
|
|||
if err != nil {
|
||||
return false, nil, reason, err
|
||||
}
|
||||
updated, reason, err := cc.setCurrentConfig(remote)
|
||||
updated, reason, err := cc.setAssignedConfig(remote)
|
||||
if err != nil {
|
||||
return false, nil, reason, err
|
||||
}
|
||||
|
@ -137,9 +137,9 @@ func (cc *Controller) doSyncConfigSource(client clientset.Interface, source *api
|
|||
func (cc *Controller) checkpointConfigSource(client clientset.Interface, source checkpoint.RemoteConfigSource) (string, error) {
|
||||
// if the checkpoint already exists, skip downloading
|
||||
if ok, err := cc.checkpointStore.Exists(source); err != nil {
|
||||
reason := fmt.Sprintf(status.FailSyncReasonCheckpointExistenceFmt, source.APIPath(), source.UID())
|
||||
return reason, fmt.Errorf("%s, error: %v", reason, err)
|
||||
return status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err)
|
||||
} else if ok {
|
||||
// TODO(mtaufen): update this to include ResourceVersion in #63221
|
||||
utillog.Infof("checkpoint already exists for object %s with UID %s, skipping download", source.APIPath(), source.UID())
|
||||
return "", nil
|
||||
}
|
||||
|
@ -153,28 +153,21 @@ func (cc *Controller) checkpointConfigSource(client clientset.Interface, source
|
|||
// save
|
||||
err = cc.checkpointStore.Save(payload)
|
||||
if err != nil {
|
||||
reason := fmt.Sprintf(status.FailSyncReasonSaveCheckpointFmt, source.APIPath(), payload.UID())
|
||||
return reason, fmt.Errorf("%s, error: %v", reason, err)
|
||||
return status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err)
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// setCurrentConfig the current checkpoint config in the store
|
||||
// returns whether the current config changed as a result, or a sanitized failure reason and an error.
|
||||
func (cc *Controller) setCurrentConfig(source checkpoint.RemoteConfigSource) (bool, string, error) {
|
||||
failReason := func(s checkpoint.RemoteConfigSource) string {
|
||||
if source == nil {
|
||||
return status.FailSyncReasonSetCurrentLocal
|
||||
}
|
||||
return fmt.Sprintf(status.FailSyncReasonSetCurrentUIDFmt, source.APIPath(), source.UID())
|
||||
}
|
||||
current, err := cc.checkpointStore.Current()
|
||||
// setAssignedConfig updates the assigned checkpoint config in the store.
|
||||
// Returns whether the current config changed as a result, or a sanitized failure reason and an error.
|
||||
func (cc *Controller) setAssignedConfig(source checkpoint.RemoteConfigSource) (bool, string, error) {
|
||||
current, err := cc.checkpointStore.Assigned()
|
||||
if err != nil {
|
||||
return false, failReason(source), err
|
||||
return false, status.InternalError, err
|
||||
}
|
||||
if err := cc.checkpointStore.SetCurrent(source); err != nil {
|
||||
return false, failReason(source), err
|
||||
if err := cc.checkpointStore.SetAssigned(source); err != nil {
|
||||
return false, status.InternalError, err
|
||||
}
|
||||
return !checkpoint.EqualRemoteConfigSources(current, source), "", nil
|
||||
}
|
||||
|
@ -184,7 +177,7 @@ func (cc *Controller) setCurrentConfig(source checkpoint.RemoteConfigSource) (bo
|
|||
func (cc *Controller) resetConfig() (bool, string, error) {
|
||||
updated, err := cc.checkpointStore.Reset()
|
||||
if err != nil {
|
||||
return false, status.FailSyncReasonReset, err
|
||||
return false, status.InternalError, err
|
||||
}
|
||||
return updated, "", nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"path/filepath"
|
||||
"time"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
@ -47,14 +46,11 @@ const (
|
|||
// Controller manages syncing dynamic Kubelet configurations
|
||||
// For more information, see the proposal: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/dynamic-kubelet-configuration.md
|
||||
type Controller struct {
|
||||
// defaultConfig is the configuration to use if no initConfig is provided
|
||||
defaultConfig *kubeletconfig.KubeletConfiguration
|
||||
|
||||
// pendingConfigSource; write to this channel to indicate that the config source needs to be synced from the API server
|
||||
pendingConfigSource chan bool
|
||||
|
||||
// configOk manages the KubeletConfigOk condition that is reported in Node.Status.Conditions
|
||||
configOk status.ConfigOkCondition
|
||||
// configStatus manages the status we report on the Node object
|
||||
configStatus status.NodeConfigStatus
|
||||
|
||||
// informer is the informer that watches the Node object
|
||||
informer cache.SharedInformer
|
||||
|
@ -64,12 +60,11 @@ type Controller struct {
|
|||
}
|
||||
|
||||
// NewController constructs a new Controller object and returns it. Directory paths must be absolute.
|
||||
func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, dynamicConfigDir string) *Controller {
|
||||
func NewController(dynamicConfigDir string) *Controller {
|
||||
return &Controller{
|
||||
defaultConfig: defaultConfig,
|
||||
// channels must have capacity at least 1, since we signal with non-blocking writes
|
||||
pendingConfigSource: make(chan bool, 1),
|
||||
configOk: status.NewConfigOkCondition(),
|
||||
configStatus: status.NewNodeConfigStatus(),
|
||||
checkpointStore: store.NewFsStore(utilfs.DefaultFs{}, filepath.Join(dynamicConfigDir, storeDir)),
|
||||
}
|
||||
}
|
||||
|
@ -79,26 +74,39 @@ func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, dynamicCon
|
|||
func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
|
||||
utillog.Infof("starting controller")
|
||||
|
||||
// ALWAYS validate the local config. This makes incorrectly provisioned nodes an error.
|
||||
// It must be valid because it is the default last-known-good config.
|
||||
utillog.Infof("validating local config")
|
||||
if err := validation.ValidateKubeletConfiguration(cc.defaultConfig); err != nil {
|
||||
return nil, fmt.Errorf("local config failed validation, error: %v", err)
|
||||
}
|
||||
|
||||
// ensure the filesystem is initialized
|
||||
if err := cc.initializeDynamicConfigDir(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
assigned, curSource, reason, err := cc.loadAssignedConfig(cc.defaultConfig)
|
||||
// determine assigned source and set status
|
||||
assignedSource, err := cc.checkpointStore.Assigned()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if assignedSource != nil {
|
||||
cc.configStatus.SetAssigned(assignedSource.NodeConfigSource())
|
||||
}
|
||||
|
||||
// determine last-known-good source and set status
|
||||
lastKnownGoodSource, err := cc.checkpointStore.LastKnownGood()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lastKnownGoodSource != nil {
|
||||
cc.configStatus.SetLastKnownGood(lastKnownGoodSource.NodeConfigSource())
|
||||
}
|
||||
|
||||
// if the assigned source is nil, return nil to indicate local config
|
||||
if assignedSource == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// attempt to load assigned config
|
||||
assignedConfig, reason, err := cc.loadConfig(assignedSource)
|
||||
if err == nil {
|
||||
// set the status to indicate we will use the assigned config
|
||||
if curSource != nil {
|
||||
cc.configOk.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curSource.APIPath()), reason, apiv1.ConditionTrue)
|
||||
} else {
|
||||
cc.configOk.Set(status.CurLocalMessage, reason, apiv1.ConditionTrue)
|
||||
}
|
||||
// update the active source to the non-nil assigned source
|
||||
cc.configStatus.SetActive(assignedSource.NodeConfigSource())
|
||||
|
||||
// update the last-known-good config if necessary, and start a timer that
|
||||
// periodically checks whether the last-known good needs to be updated
|
||||
|
@ -106,32 +114,34 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
|
|||
// wait.Forever will call the func once before starting the timer
|
||||
go wait.Forever(func() { cc.checkTrial(configTrialDuration) }, 10*time.Second)
|
||||
|
||||
return assigned, nil
|
||||
} // Assert: the assigned config failed to load, parse, or validate
|
||||
return assignedConfig, nil
|
||||
} // Assert: the assigned config failed to load or validate
|
||||
|
||||
// TODO(mtaufen): consider re-attempting download when a load/verify/parse/validate
|
||||
// error happens outside trial period, we already made it past the trial so it's probably filesystem corruption
|
||||
// or something else scary (unless someone is using a 0-length trial period)
|
||||
// load from checkpoint
|
||||
// or something else scary
|
||||
|
||||
// log the reason and error details for the failure to load the assigned config
|
||||
utillog.Errorf(fmt.Sprintf("%s, error: %v", reason, err))
|
||||
|
||||
// load the last-known-good config
|
||||
lkg, lkgSource, err := cc.loadLastKnownGoodConfig(cc.defaultConfig)
|
||||
// set status to indicate the failure with the assigned config
|
||||
cc.configStatus.SetError(reason)
|
||||
|
||||
// if the last-known-good source is nil, return nil to indicate local config
|
||||
if lastKnownGoodSource == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// attempt to load the last-known-good config
|
||||
lastKnownGoodConfig, _, err := cc.loadConfig(lastKnownGoodSource)
|
||||
if err != nil {
|
||||
// we failed to load the last-known-good, so something is really messed up and we just return the error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set the status to indicate that we had to roll back to the lkg for the reason reported when we tried to load the assigned config
|
||||
if lkgSource != nil {
|
||||
cc.configOk.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgSource.APIPath()), reason, apiv1.ConditionFalse)
|
||||
} else {
|
||||
cc.configOk.Set(status.LkgLocalMessage, reason, apiv1.ConditionFalse)
|
||||
}
|
||||
|
||||
// return the last-known-good config
|
||||
return lkg, nil
|
||||
// update the active source to the non-nil last-known-good source
|
||||
cc.configStatus.SetActive(lastKnownGoodSource.NodeConfigSource())
|
||||
return lastKnownGoodConfig, nil
|
||||
}
|
||||
|
||||
// StartSync launches the controller's sync loops if `client` is non-nil and `nodeName` is non-empty.
|
||||
|
@ -146,11 +156,11 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
|
|||
return
|
||||
}
|
||||
|
||||
// start the ConfigOk condition sync loop
|
||||
// start the status sync loop
|
||||
go utilpanic.HandlePanic(func() {
|
||||
utillog.Infof("starting ConfigOk condition sync loop")
|
||||
utillog.Infof("starting status sync loop")
|
||||
wait.JitterUntil(func() {
|
||||
cc.configOk.Sync(client, nodeName)
|
||||
cc.configStatus.Sync(client, nodeName)
|
||||
}, 10*time.Second, 0.2, true, wait.NeverStop)
|
||||
})()
|
||||
|
||||
|
@ -176,54 +186,18 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
|
|||
|
||||
}
|
||||
|
||||
// loadAssignedConfig loads the Kubelet's currently assigned config,
|
||||
// based on the setting in the local checkpoint store.
|
||||
// It returns the loaded configuration, the checkpoint store's config source record,
|
||||
// a clean success or failure reason that can be reported in the status, and any error that occurs.
|
||||
// If the local config should be used, it will be returned. You should validate local before passing it to this function.
|
||||
func (cc *Controller) loadAssignedConfig(local *kubeletconfig.KubeletConfiguration) (*kubeletconfig.KubeletConfiguration, checkpoint.RemoteConfigSource, string, error) {
|
||||
source, err := cc.checkpointStore.Current()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Sprintf(status.CurFailLoadReasonFmt, "unknown"), err
|
||||
}
|
||||
// nil source is the signal to use the local config
|
||||
if source == nil {
|
||||
return local, source, status.CurLocalOkayReason, nil
|
||||
}
|
||||
// loadConfig loads Kubelet config from a checkpoint
|
||||
// It returns the loaded configuration or a clean failure reason (for status reporting) and an error.
|
||||
func (cc *Controller) loadConfig(source checkpoint.RemoteConfigSource) (*kubeletconfig.KubeletConfiguration, string, error) {
|
||||
// load KubeletConfiguration from checkpoint
|
||||
kc, err := cc.checkpointStore.Load(source)
|
||||
if err != nil {
|
||||
return nil, source, fmt.Sprintf(status.CurFailLoadReasonFmt, source.APIPath()), err
|
||||
return nil, status.LoadError, err
|
||||
}
|
||||
if err := validation.ValidateKubeletConfiguration(kc); err != nil {
|
||||
return nil, source, fmt.Sprintf(status.CurFailValidateReasonFmt, source.APIPath()), err
|
||||
return nil, status.ValidateError, err
|
||||
}
|
||||
return kc, source, status.CurRemoteOkayReason, nil
|
||||
}
|
||||
|
||||
// loadLastKnownGoodConfig loads the Kubelet's last-known-good config,
|
||||
// based on the setting in the local checkpoint store.
|
||||
// It returns the loaded configuration, the checkpoint store's config source record,
|
||||
// and any error that occurs.
|
||||
// If the local config should be used, it will be returned. You should validate local before passing it to this function.
|
||||
func (cc *Controller) loadLastKnownGoodConfig(local *kubeletconfig.KubeletConfiguration) (*kubeletconfig.KubeletConfiguration, checkpoint.RemoteConfigSource, error) {
|
||||
source, err := cc.checkpointStore.LastKnownGood()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to determine last-known-good config, error: %v", err)
|
||||
}
|
||||
// nil source is the signal to use the local config
|
||||
if source == nil {
|
||||
return local, source, nil
|
||||
}
|
||||
// load from checkpoint
|
||||
kc, err := cc.checkpointStore.Load(source)
|
||||
if err != nil {
|
||||
return nil, source, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailLoadReasonFmt, source.APIPath()), err)
|
||||
}
|
||||
if err := validation.ValidateKubeletConfiguration(kc); err != nil {
|
||||
return nil, source, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailValidateReasonFmt, source.APIPath()), err)
|
||||
}
|
||||
return kc, source, nil
|
||||
return kc, "", nil
|
||||
}
|
||||
|
||||
// initializeDynamicConfigDir makes sure that the storage layers for various controller components are set up correctly
|
||||
|
@ -246,10 +220,10 @@ func (cc *Controller) checkTrial(duration time.Duration) {
|
|||
}
|
||||
}
|
||||
|
||||
// inTrial returns true if the time elapsed since the last modification of the current config does not exceed `trialDur`, false otherwise
|
||||
// inTrial returns true if the time elapsed since the last modification of the assigned config does not exceed `trialDur`, false otherwise
|
||||
func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) {
|
||||
now := time.Now()
|
||||
t, err := cc.checkpointStore.CurrentModified()
|
||||
t, err := cc.checkpointStore.AssignedModified()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -260,15 +234,19 @@ func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) {
|
|||
}
|
||||
|
||||
// graduateAssignedToLastKnownGood sets the last-known-good in the checkpointStore
|
||||
// to the same value as the current config maintained by the checkpointStore
|
||||
// to the same value as the assigned config maintained by the checkpointStore
|
||||
func (cc *Controller) graduateAssignedToLastKnownGood() error {
|
||||
current, err := cc.checkpointStore.Current()
|
||||
// get the assigned config
|
||||
assigned, err := cc.checkpointStore.Assigned()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cc.checkpointStore.SetLastKnownGood(current)
|
||||
// update the last-known-good config
|
||||
err = cc.checkpointStore.SetLastKnownGood(assigned)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// update the status to reflect the new last-known-good config
|
||||
cc.configStatus.SetLastKnownGood(assigned.NodeConfigSource())
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -10,14 +10,11 @@ go_library(
|
|||
srcs = ["status.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/util/equal:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/util/log:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -19,298 +19,164 @@ package status
|
|||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kuberuntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
utilequal "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal"
|
||||
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
// TODO(mtaufen): s/current/assigned, as this is more accurate e.g. if you are using lkg, you aren't currently using "current" :)
|
||||
const (
|
||||
// CurLocalMessage indicates that the Kubelet is using its local config, which consists of defaults, flags, and/or local files
|
||||
CurLocalMessage = "using current: local"
|
||||
// LkgLocalMessage indicates that the Kubelet is using its local config, which consists of defaults, flags, and/or local files
|
||||
LkgLocalMessage = "using last-known-good: local"
|
||||
|
||||
// CurRemoteMessageFmt indicates the Kubelet is using its current config, which is from an API source
|
||||
CurRemoteMessageFmt = "using current: %s"
|
||||
// LkgRemoteMessageFmt indicates the Kubelet is using its last-known-good config, which is from an API source
|
||||
LkgRemoteMessageFmt = "using last-known-good: %s"
|
||||
|
||||
// CurLocalOkayReason indicates that the Kubelet is using its local config
|
||||
CurLocalOkayReason = "when the config source is nil, the Kubelet uses its local config"
|
||||
// CurRemoteOkayReason indicates that the config referenced by Node.ConfigSource is currently passing all checks
|
||||
CurRemoteOkayReason = "passing all checks"
|
||||
|
||||
// CurFailLoadReasonFmt indicates that the Kubelet failed to load the current config checkpoint for an API source
|
||||
CurFailLoadReasonFmt = "failed to load current: %s"
|
||||
// CurFailValidateReasonFmt indicates that the Kubelet failed to validate the current config checkpoint for an API source
|
||||
CurFailValidateReasonFmt = "failed to validate current: %s"
|
||||
|
||||
// LkgFail*ReasonFmt reasons are currently used to print errors in the Kubelet log, but do not appear in Node.Status.Conditions
|
||||
|
||||
// LkgFailLoadReasonFmt indicates that the Kubelet failed to load the last-known-good config checkpoint for an API source
|
||||
LkgFailLoadReasonFmt = "failed to load last-known-good: %s"
|
||||
// LkgFailValidateReasonFmt indicates that the Kubelet failed to validate the last-known-good config checkpoint for an API source
|
||||
LkgFailValidateReasonFmt = "failed to validate last-known-good: %s"
|
||||
|
||||
// FailSyncReasonFmt is used when the system couldn't sync the config, due to a malformed Node.Spec.ConfigSource, a download failure, etc.
|
||||
FailSyncReasonFmt = "failed to sync, reason: %s"
|
||||
// FailSyncReasonAllNilSubfields is used when no subfields are set
|
||||
FailSyncReasonAllNilSubfields = "invalid NodeConfigSource, exactly one subfield must be non-nil, but all were nil"
|
||||
// FailSyncReasonPartialConfigMapSource is used when some required subfields remain unset
|
||||
FailSyncReasonPartialConfigMapSource = "invalid ConfigSource.ConfigMap, all of UID, Name, Namespace, and KubeletConfigKey must be specified"
|
||||
// FailSyncReasonUIDMismatchFmt is used when there is a UID mismatch between the referenced and downloaded ConfigMaps,
|
||||
// LoadError indicates that the Kubelet failed to load the config checkpoint
|
||||
LoadError = "failed to load config, see Kubelet log for details"
|
||||
// ValidateError indicates that the Kubelet failed to validate the config checkpoint
|
||||
ValidateError = "failed to validate config, see Kubelet log for details"
|
||||
// AllNilSubfieldsError is used when no subfields are set
|
||||
// This could happen in the case that an old client tries to read an object from a newer API server with a set subfield it does not know about
|
||||
AllNilSubfieldsError = "invalid NodeConfigSource, exactly one subfield must be non-nil, but all were nil"
|
||||
// UIDMismatchErrorFmt is used when there is a UID mismatch between the referenced and downloaded ConfigMaps,
|
||||
// this can happen because objects must be downloaded by namespace/name, rather than by UID
|
||||
FailSyncReasonUIDMismatchFmt = "invalid ConfigSource.ConfigMap.UID: %s does not match %s.UID: %s"
|
||||
// FailSyncReasonDownloadFmt is used when the download fails, e.g. due to network issues
|
||||
FailSyncReasonDownloadFmt = "failed to download: %s"
|
||||
// FailSyncReasonInformer is used when the informer fails to report the Node object
|
||||
FailSyncReasonInformer = "failed to read Node from informer object cache"
|
||||
// FailSyncReasonReset is used when we can't reset the local configuration references, e.g. due to filesystem issues
|
||||
FailSyncReasonReset = "failed to reset to local config"
|
||||
// FailSyncReasonCheckpointExistenceFmt is used when we can't determine if a checkpoint already exists, e.g. due to filesystem issues
|
||||
FailSyncReasonCheckpointExistenceFmt = "failed to determine whether object %s with UID %s was already checkpointed"
|
||||
// FailSyncReasonSaveCheckpointFmt is used when we can't save a checkpoint, e.g. due to filesystem issues
|
||||
FailSyncReasonSaveCheckpointFmt = "failed to save config checkpoint for object %s with UID %s"
|
||||
// FailSyncReasonSetCurrentDefault is used when we can't set the current config checkpoint to the local default, e.g. due to filesystem issues
|
||||
FailSyncReasonSetCurrentLocal = "failed to set current config checkpoint to local config"
|
||||
// FailSyncReasonSetCurrentUIDFmt is used when we can't set the current config checkpoint to a checkpointed object, e.g. due to filesystem issues
|
||||
FailSyncReasonSetCurrentUIDFmt = "failed to set current config checkpoint to object %s with UID %s"
|
||||
// TODO(mtaufen): remove this in #63221
|
||||
UIDMismatchErrorFmt = "invalid ConfigSource.ConfigMap.UID: %s does not match %s.UID: %s"
|
||||
// DownloadError is used when the download fails, e.g. due to network issues
|
||||
DownloadError = "failed to download config, see Kubelet log for details"
|
||||
// InternalError indicates that some internal error happened while trying to sync config, e.g. filesystem issues
|
||||
InternalError = "internal failure, see Kubelet log for details"
|
||||
|
||||
// EmptyMessage is a placeholder in the case that we accidentally set the condition's message to the empty string.
|
||||
// Doing so can result in a partial patch, and thus a confusing status; this makes it clear that the message was not provided.
|
||||
EmptyMessage = "unknown - message not provided"
|
||||
// EmptyReason is a placeholder in the case that we accidentally set the condition's reason to the empty string.
|
||||
// Doing so can result in a partial patch, and thus a confusing status; this makes it clear that the reason was not provided.
|
||||
EmptyReason = "unknown - reason not provided"
|
||||
// SyncErrorFmt is used when the system couldn't sync the config, due to a malformed Node.Spec.ConfigSource, a download failure, etc.
|
||||
SyncErrorFmt = "failed to sync: %s"
|
||||
)
|
||||
|
||||
// ConfigOkCondition represents a ConfigOk NodeCondition
|
||||
type ConfigOkCondition interface {
|
||||
// Set sets the Message, Reason, and Status of the condition
|
||||
Set(message, reason string, status apiv1.ConditionStatus)
|
||||
// SetFailSyncCondition sets the condition for when syncing Kubelet config fails
|
||||
SetFailSyncCondition(reason string)
|
||||
// ClearFailSyncCondition clears the overlay from SetFailSyncCondition
|
||||
ClearFailSyncCondition()
|
||||
// Sync patches the current condition into the Node identified by `nodeName`
|
||||
// NodeConfigStatus represents Node.Status.Config
|
||||
type NodeConfigStatus interface {
|
||||
// SetActive sets the active source in the status
|
||||
SetActive(source *apiv1.NodeConfigSource)
|
||||
// SetAssigned sets the assigned source in the status
|
||||
SetAssigned(source *apiv1.NodeConfigSource)
|
||||
// SetLastKnownGood sets the last-known-good source in the status
|
||||
SetLastKnownGood(source *apiv1.NodeConfigSource)
|
||||
// SetError sets the error associated with the status
|
||||
SetError(err string)
|
||||
// SetErrorOverride sets an error that overrides the base error set by SetError.
|
||||
// If the override is set to the empty string, the base error is reported in
|
||||
// the status, otherwise the override is reported.
|
||||
SetErrorOverride(err string)
|
||||
// Sync patches the current status into the Node identified by `nodeName` if an update is pending
|
||||
Sync(client clientset.Interface, nodeName string)
|
||||
}
|
||||
|
||||
// configOkCondition implements ConfigOkCondition
|
||||
type configOkCondition struct {
|
||||
// conditionMux is a mutex on the condition, alternate between setting and syncing the condition
|
||||
conditionMux sync.Mutex
|
||||
// message will appear as the condition's message
|
||||
message string
|
||||
// reason will appear as the condition's reason
|
||||
reason string
|
||||
// status will appear as the condition's status
|
||||
status apiv1.ConditionStatus
|
||||
// failedSyncReason is sent in place of the usual reason when the Kubelet is failing to sync the remote config
|
||||
failedSyncReason string
|
||||
// pendingCondition; write to this channel to indicate that ConfigOk needs to be synced to the API server
|
||||
pendingCondition chan bool
|
||||
type nodeConfigStatus struct {
|
||||
// status is the core NodeConfigStatus that we report
|
||||
status apiv1.NodeConfigStatus
|
||||
// mux is a mutex on the nodeConfigStatus, alternate between setting and syncing the status
|
||||
mux sync.Mutex
|
||||
// errorOverride is sent in place of the usual error if it is non-empty
|
||||
errorOverride string
|
||||
// syncCh; write to this channel to indicate that the status needs to be synced to the API server
|
||||
syncCh chan bool
|
||||
}
|
||||
|
||||
// NewConfigOkCondition returns a new ConfigOkCondition
|
||||
func NewConfigOkCondition() ConfigOkCondition {
|
||||
return &configOkCondition{
|
||||
message: EmptyMessage,
|
||||
reason: EmptyReason,
|
||||
status: apiv1.ConditionUnknown,
|
||||
// NewNodeConfigStatus returns a new NodeConfigStatus interface
|
||||
func NewNodeConfigStatus() NodeConfigStatus {
|
||||
return &nodeConfigStatus{
|
||||
// channels must have capacity at least 1, since we signal with non-blocking writes
|
||||
pendingCondition: make(chan bool, 1),
|
||||
syncCh: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// unsafeSet sets the current state of the condition
|
||||
// it does not grab the conditionMux lock, so you should generally use setConfigOk unless you need to grab the lock
|
||||
// at a higher level to synchronize additional operations
|
||||
func (c *configOkCondition) unsafeSet(message, reason string, status apiv1.ConditionStatus) {
|
||||
// We avoid an empty Message, Reason, or Status on the condition. Since we use Patch to update conditions, an empty
|
||||
// field might cause a value from a previous condition to leak through, which can be very confusing.
|
||||
|
||||
// message
|
||||
if len(message) == 0 {
|
||||
message = EmptyMessage
|
||||
}
|
||||
c.message = message
|
||||
// reason
|
||||
if len(reason) == 0 {
|
||||
reason = EmptyReason
|
||||
}
|
||||
c.reason = reason
|
||||
// status
|
||||
if len(string(status)) == 0 {
|
||||
status = apiv1.ConditionUnknown
|
||||
}
|
||||
c.status = status
|
||||
// always poke worker after update
|
||||
c.pokeSyncWorker()
|
||||
// transact grabs the lock, performs the fn, records the need to sync, and releases the lock
|
||||
func (s *nodeConfigStatus) transact(fn func()) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
fn()
|
||||
s.sync()
|
||||
}
|
||||
|
||||
func (c *configOkCondition) Set(message, reason string, status apiv1.ConditionStatus) {
|
||||
c.conditionMux.Lock()
|
||||
defer c.conditionMux.Unlock()
|
||||
c.unsafeSet(message, reason, status)
|
||||
func (s *nodeConfigStatus) SetAssigned(source *apiv1.NodeConfigSource) {
|
||||
s.transact(func() {
|
||||
s.status.Assigned = source
|
||||
})
|
||||
}
|
||||
|
||||
// SetFailSyncCondition updates the ConfigOk status to reflect that we failed to sync to the latest config,
|
||||
// e.g. due to a malformed Node.Spec.ConfigSource, a download failure, etc.
|
||||
func (c *configOkCondition) SetFailSyncCondition(reason string) {
|
||||
c.conditionMux.Lock()
|
||||
defer c.conditionMux.Unlock()
|
||||
// set the reason overlay and poke the sync worker to send the update
|
||||
c.failedSyncReason = fmt.Sprintf(FailSyncReasonFmt, reason)
|
||||
c.pokeSyncWorker()
|
||||
func (s *nodeConfigStatus) SetActive(source *apiv1.NodeConfigSource) {
|
||||
s.transact(func() {
|
||||
s.status.Active = source
|
||||
})
|
||||
}
|
||||
|
||||
// ClearFailSyncCondition removes the "failed to sync" reason overlay
|
||||
func (c *configOkCondition) ClearFailSyncCondition() {
|
||||
c.conditionMux.Lock()
|
||||
defer c.conditionMux.Unlock()
|
||||
// clear the reason overlay and poke the sync worker to send the update
|
||||
c.failedSyncReason = ""
|
||||
c.pokeSyncWorker()
|
||||
func (s *nodeConfigStatus) SetLastKnownGood(source *apiv1.NodeConfigSource) {
|
||||
s.transact(func() {
|
||||
s.status.LastKnownGood = source
|
||||
})
|
||||
}
|
||||
|
||||
// pokeSyncWorker notes that the ConfigOk condition needs to be synced to the API server
|
||||
func (c *configOkCondition) pokeSyncWorker() {
|
||||
func (s *nodeConfigStatus) SetError(err string) {
|
||||
s.transact(func() {
|
||||
s.status.Error = err
|
||||
})
|
||||
}
|
||||
|
||||
func (s *nodeConfigStatus) SetErrorOverride(err string) {
|
||||
s.transact(func() {
|
||||
s.errorOverride = err
|
||||
})
|
||||
}
|
||||
|
||||
// sync notes that the status needs to be synced to the API server
|
||||
func (s *nodeConfigStatus) sync() {
|
||||
select {
|
||||
case c.pendingCondition <- true:
|
||||
case s.syncCh <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Sync attempts to sync `c.condition` with the Node object for this Kubelet,
|
||||
// Sync attempts to sync the status with the Node object for this Kubelet,
|
||||
// if syncing fails, an error is logged, and work is queued for retry.
|
||||
func (c *configOkCondition) Sync(client clientset.Interface, nodeName string) {
|
||||
func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) {
|
||||
select {
|
||||
case <-c.pendingCondition:
|
||||
case <-s.syncCh:
|
||||
default:
|
||||
// no work to be done, return
|
||||
return
|
||||
}
|
||||
|
||||
// grab the lock
|
||||
c.conditionMux.Lock()
|
||||
defer c.conditionMux.Unlock()
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
// if the sync fails, we want to retry
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
utillog.Errorf(err.Error())
|
||||
c.pokeSyncWorker()
|
||||
s.sync()
|
||||
}
|
||||
}()
|
||||
|
||||
// get the Node so we can check the current condition
|
||||
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
// get the Node so we can check the current status
|
||||
oldNode, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not get Node %q, will not sync ConfigOk condition, error: %v", nodeName, err)
|
||||
err = fmt.Errorf("could not get Node %q, will not sync status, error: %v", nodeName, err)
|
||||
return
|
||||
}
|
||||
|
||||
// construct the node condition
|
||||
condition := &apiv1.NodeCondition{
|
||||
Type: apiv1.NodeKubeletConfigOk,
|
||||
Message: c.message,
|
||||
Reason: c.reason,
|
||||
Status: c.status,
|
||||
status := &s.status
|
||||
// override error, if necessary
|
||||
if len(s.errorOverride) > 0 {
|
||||
// copy the status, so we don't overwrite the prior error
|
||||
// with the override
|
||||
status = status.DeepCopy()
|
||||
status.Error = s.errorOverride
|
||||
}
|
||||
|
||||
// overlay failed sync reason, if necessary
|
||||
if len(c.failedSyncReason) > 0 {
|
||||
condition.Reason = c.failedSyncReason
|
||||
condition.Status = apiv1.ConditionFalse
|
||||
}
|
||||
// apply the status to a copy of the node so we don't modify the object in the informer's store
|
||||
newNode := oldNode.DeepCopy()
|
||||
newNode.Status.Config = status
|
||||
|
||||
// set timestamps
|
||||
syncTime := metav1.NewTime(time.Now())
|
||||
condition.LastHeartbeatTime = syncTime
|
||||
if remote := getKubeletConfigOk(node.Status.Conditions); remote == nil || !utilequal.KubeletConfigOkEq(remote, condition) {
|
||||
// update transition time the first time we create the condition,
|
||||
// or if we are semantically changing the condition
|
||||
condition.LastTransitionTime = syncTime
|
||||
} else {
|
||||
// since the conditions are semantically equal, use lastTransitionTime from the condition currently on the Node
|
||||
// we need to do this because the field will always be represented in the patch generated below, and this copy
|
||||
// prevents nullifying the field during the patch operation
|
||||
condition.LastTransitionTime = remote.LastTransitionTime
|
||||
}
|
||||
|
||||
// generate the patch
|
||||
mediaType := "application/json"
|
||||
info, ok := kuberuntime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), mediaType)
|
||||
if !ok {
|
||||
err = fmt.Errorf("unsupported media type %q", mediaType)
|
||||
return
|
||||
}
|
||||
versions := legacyscheme.Scheme.PrioritizedVersionsForGroup(api.GroupName)
|
||||
if len(versions) == 0 {
|
||||
err = fmt.Errorf("no enabled versions for group %q", api.GroupName)
|
||||
return
|
||||
}
|
||||
// the "best" version supposedly comes first in the list returned from apiv1.Registry.EnabledVersionsForGroup
|
||||
encoder := legacyscheme.Codecs.EncoderForVersion(info.Serializer, versions[0])
|
||||
|
||||
before, err := kuberuntime.Encode(encoder, node)
|
||||
if err != nil {
|
||||
err = fmt.Errorf(`failed to encode "before" node while generating patch, error: %v`, err)
|
||||
return
|
||||
}
|
||||
|
||||
patchConfigOk(node, condition)
|
||||
after, err := kuberuntime.Encode(encoder, node)
|
||||
if err != nil {
|
||||
err = fmt.Errorf(`failed to encode "after" node while generating patch, error: %v`, err)
|
||||
return
|
||||
}
|
||||
|
||||
patch, err := strategicpatch.CreateTwoWayMergePatch(before, after, apiv1.Node{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to generate patch for updating ConfigOk condition, error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// patch the remote Node object
|
||||
_, err = client.CoreV1().Nodes().PatchStatus(nodeName, patch)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not update ConfigOk condition, error: %v", err)
|
||||
return
|
||||
// patch the node with the new status
|
||||
if _, _, err := nodeutil.PatchNodeStatus(client.CoreV1(), types.NodeName(nodeName), oldNode, newNode); err != nil {
|
||||
utillog.Errorf("failed to patch node status, error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// patchConfigOk replaces or adds the ConfigOk condition to the node
|
||||
func patchConfigOk(node *apiv1.Node, configOk *apiv1.NodeCondition) {
|
||||
for i := range node.Status.Conditions {
|
||||
if node.Status.Conditions[i].Type == apiv1.NodeKubeletConfigOk {
|
||||
// edit the condition
|
||||
node.Status.Conditions[i] = *configOk
|
||||
return
|
||||
}
|
||||
}
|
||||
// append the condition
|
||||
node.Status.Conditions = append(node.Status.Conditions, *configOk)
|
||||
}
|
||||
|
||||
// getKubeletConfigOk returns the first NodeCondition in `cs` with Type == apiv1.NodeKubeletConfigOk,
|
||||
// or if no such condition exists, returns nil.
|
||||
func getKubeletConfigOk(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
for i := range cs {
|
||||
if cs[i].Type == apiv1.NodeKubeletConfigOk {
|
||||
return &cs[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ type REST struct {
|
|||
proxyTransport http.RoundTripper
|
||||
}
|
||||
|
||||
// StatusREST implements the REST endpoint for changing the status of a pod.
|
||||
// StatusREST implements the REST endpoint for changing the status of a node.
|
||||
type StatusREST struct {
|
||||
store *genericregistry.Store
|
||||
}
|
||||
|
|
|
@ -127,14 +127,23 @@ type nodeStatusStrategy struct {
|
|||
var StatusStrategy = nodeStatusStrategy{Strategy}
|
||||
|
||||
func (nodeStatusStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
_ = obj.(*api.Node)
|
||||
node := obj.(*api.Node)
|
||||
// Nodes allow *all* fields, including status, to be set on create.
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
node.Status.Config = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (nodeStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newNode := obj.(*api.Node)
|
||||
oldNode := old.(*api.Node)
|
||||
newNode.Spec = oldNode.Spec
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
newNode.Status.Config = nil
|
||||
oldNode.Status.Config = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (nodeStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1847,6 +1847,56 @@ message NodeConfigSource {
|
|||
optional ConfigMapNodeConfigSource configMap = 2;
|
||||
}
|
||||
|
||||
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
|
||||
message NodeConfigStatus {
|
||||
// Assigned reports the checkpointed config the node will try to use.
|
||||
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
|
||||
// config payload to local disk, along with a record indicating intended
|
||||
// config. The node refers to this record to choose its config checkpoint, and
|
||||
// reports this record in Assigned. Assigned only updates in the status after
|
||||
// the record has been checkpointed to disk. When the Kubelet is restarted,
|
||||
// it tries to make the Assigned config the Active config by loading and
|
||||
// validating the checkpointed payload identified by Assigned.
|
||||
// +optional
|
||||
optional NodeConfigSource assigned = 1;
|
||||
|
||||
// Active reports the checkpointed config the node is actively using.
|
||||
// Active will represent either the current version of the Assigned config,
|
||||
// or the current LastKnownGood config, depending on whether attempting to use the
|
||||
// Assigned config results in an error.
|
||||
// +optional
|
||||
optional NodeConfigSource active = 2;
|
||||
|
||||
// LastKnownGood reports the checkpointed config the node will fall back to
|
||||
// when it encounters an error attempting to use the Assigned config.
|
||||
// The Assigned config becomes the LastKnownGood config when the node determines
|
||||
// that the Assigned config is stable and correct.
|
||||
// This is currently implemented as a 10-minute soak period starting when the local
|
||||
// record of Assigned config is updated. If the Assigned config is Active at the end
|
||||
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
|
||||
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
|
||||
// because the local default config is always assumed good.
|
||||
// You should not make assumptions about the node's method of determining config stability
|
||||
// and correctness, as this may change or become configurable in the future.
|
||||
// +optional
|
||||
optional NodeConfigSource lastKnownGood = 3;
|
||||
|
||||
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
|
||||
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
|
||||
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
|
||||
// to load or validate the Assigned config, etc.
|
||||
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
|
||||
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
|
||||
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
|
||||
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
|
||||
// by fixing the config assigned in Spec.ConfigSource.
|
||||
// You can find additional information for debugging by searching the error message in the Kubelet log.
|
||||
// Error is a human-readable description of the error state; machines can check whether or not Error
|
||||
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
|
||||
// +optional
|
||||
optional string error = 4;
|
||||
}
|
||||
|
||||
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
|
||||
message NodeDaemonEndpoints {
|
||||
// Endpoint on which Kubelet is listening.
|
||||
|
@ -2001,6 +2051,10 @@ message NodeStatus {
|
|||
// List of volumes that are attached to the node.
|
||||
// +optional
|
||||
repeated AttachedVolume volumesAttached = 10;
|
||||
|
||||
// Status of the config assigned to the node via the dynamic Kubelet config feature.
|
||||
// +optional
|
||||
optional NodeConfigStatus config = 11;
|
||||
}
|
||||
|
||||
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
|
||||
|
|
|
@ -3732,6 +3732,53 @@ type NodeSystemInfo struct {
|
|||
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
|
||||
}
|
||||
|
||||
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
|
||||
type NodeConfigStatus struct {
|
||||
// Assigned reports the checkpointed config the node will try to use.
|
||||
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
|
||||
// config payload to local disk, along with a record indicating intended
|
||||
// config. The node refers to this record to choose its config checkpoint, and
|
||||
// reports this record in Assigned. Assigned only updates in the status after
|
||||
// the record has been checkpointed to disk. When the Kubelet is restarted,
|
||||
// it tries to make the Assigned config the Active config by loading and
|
||||
// validating the checkpointed payload identified by Assigned.
|
||||
// +optional
|
||||
Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"`
|
||||
// Active reports the checkpointed config the node is actively using.
|
||||
// Active will represent either the current version of the Assigned config,
|
||||
// or the current LastKnownGood config, depending on whether attempting to use the
|
||||
// Assigned config results in an error.
|
||||
// +optional
|
||||
Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"`
|
||||
// LastKnownGood reports the checkpointed config the node will fall back to
|
||||
// when it encounters an error attempting to use the Assigned config.
|
||||
// The Assigned config becomes the LastKnownGood config when the node determines
|
||||
// that the Assigned config is stable and correct.
|
||||
// This is currently implemented as a 10-minute soak period starting when the local
|
||||
// record of Assigned config is updated. If the Assigned config is Active at the end
|
||||
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
|
||||
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
|
||||
// because the local default config is always assumed good.
|
||||
// You should not make assumptions about the node's method of determining config stability
|
||||
// and correctness, as this may change or become configurable in the future.
|
||||
// +optional
|
||||
LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"`
|
||||
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
|
||||
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
|
||||
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
|
||||
// to load or validate the Assigned config, etc.
|
||||
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
|
||||
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
|
||||
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
|
||||
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
|
||||
// by fixing the config assigned in Spec.ConfigSource.
|
||||
// You can find additional information for debugging by searching the error message in the Kubelet log.
|
||||
// Error is a human-readable description of the error state; machines can check whether or not Error
|
||||
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
|
||||
// +optional
|
||||
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
|
||||
}
|
||||
|
||||
// NodeStatus is information about the current status of a node.
|
||||
type NodeStatus struct {
|
||||
// Capacity represents the total resources of a node.
|
||||
|
@ -3776,6 +3823,9 @@ type NodeStatus struct {
|
|||
// List of volumes that are attached to the node.
|
||||
// +optional
|
||||
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
|
||||
// Status of the config assigned to the node via the dynamic Kubelet config feature.
|
||||
// +optional
|
||||
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
|
||||
}
|
||||
|
||||
type UniqueVolumeName string
|
||||
|
@ -3863,8 +3913,6 @@ const (
|
|||
NodePIDPressure NodeConditionType = "PIDPressure"
|
||||
// NodeNetworkUnavailable means that network for the node is not correctly configured.
|
||||
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
|
||||
// NodeKubeletConfigOk indicates whether the kubelet is correctly configured
|
||||
NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
|
||||
)
|
||||
|
||||
// NodeCondition contains condition information for a node.
|
||||
|
|
|
@ -990,6 +990,18 @@ func (NodeConfigSource) SwaggerDoc() map[string]string {
|
|||
return map_NodeConfigSource
|
||||
}
|
||||
|
||||
var map_NodeConfigStatus = map[string]string{
|
||||
"": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
|
||||
"assigned": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.",
|
||||
"active": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.",
|
||||
"lastKnownGood": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.",
|
||||
"error": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.",
|
||||
}
|
||||
|
||||
func (NodeConfigStatus) SwaggerDoc() map[string]string {
|
||||
return map_NodeConfigStatus
|
||||
}
|
||||
|
||||
var map_NodeDaemonEndpoints = map[string]string{
|
||||
"": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.",
|
||||
"kubeletEndpoint": "Endpoint on which Kubelet is listening.",
|
||||
|
@ -1083,6 +1095,7 @@ var map_NodeStatus = map[string]string{
|
|||
"images": "List of container images on this node",
|
||||
"volumesInUse": "List of attachable volumes in use (mounted) by the node.",
|
||||
"volumesAttached": "List of volumes that are attached to the node.",
|
||||
"config": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
|
||||
}
|
||||
|
||||
func (NodeStatus) SwaggerDoc() map[string]string {
|
||||
|
|
|
@ -2398,6 +2398,49 @@ func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) {
|
||||
*out = *in
|
||||
if in.Assigned != nil {
|
||||
in, out := &in.Assigned, &out.Assigned
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(NodeConfigSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.Active != nil {
|
||||
in, out := &in.Active, &out.Active
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(NodeConfigSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.LastKnownGood != nil {
|
||||
in, out := &in.LastKnownGood, &out.LastKnownGood
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(NodeConfigSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus.
|
||||
func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeConfigStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) {
|
||||
*out = *in
|
||||
|
@ -2650,6 +2693,15 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
|
|||
*out = make([]AttachedVolume, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Config != nil {
|
||||
in, out := &in.Config, &out.Config
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(NodeConfigStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -147,6 +147,7 @@ go_test(
|
|||
"//vendor/github.com/onsi/gomega/gstruct:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
|
||||
|
@ -36,11 +37,19 @@ import (
|
|||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type configStateStatus struct {
|
||||
apiv1.NodeConfigStatus
|
||||
|
||||
SkipActive bool
|
||||
SkipAssigned bool
|
||||
SkipLkg bool
|
||||
}
|
||||
|
||||
type configState struct {
|
||||
desc string
|
||||
configSource *apiv1.NodeConfigSource
|
||||
expectConfigOk *apiv1.NodeCondition
|
||||
expectConfig *kubeletconfig.KubeletConfiguration
|
||||
desc string
|
||||
configSource *apiv1.NodeConfigSource
|
||||
expectConfigStatus *configStateStatus
|
||||
expectConfig *kubeletconfig.KubeletConfiguration
|
||||
// whether to expect this substring in an error returned from the API server when updating the config source
|
||||
apierr string
|
||||
// whether the state would cause a config change event as a result of the update to Node.Spec.ConfigSource,
|
||||
|
@ -83,21 +92,27 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
// should be fine to reset the values using a remote config, even if they
|
||||
// were initially set via the locally provisioned configuration.
|
||||
// This is the same strategy several other e2e node tests use.
|
||||
|
||||
source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: originalConfigMap.UID,
|
||||
Namespace: originalConfigMap.Namespace,
|
||||
Name: originalConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
setAndTestKubeletConfigState(f, &configState{desc: "reset to original values",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: originalConfigMap.UID,
|
||||
Namespace: originalConfigMap.Namespace,
|
||||
Name: originalConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(originalConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
configSource: source,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Active: source,
|
||||
Assigned: source,
|
||||
},
|
||||
SkipLkg: true,
|
||||
},
|
||||
expectConfig: originalKC,
|
||||
}, false)
|
||||
})
|
||||
|
||||
Context("When setting new NodeConfigSources that cause transitions between ConfigOk conditions", func() {
|
||||
Context("When changing NodeConfigSources", func() {
|
||||
It("the Kubelet should report the appropriate status and configz", func() {
|
||||
var err error
|
||||
// we base the "correct" configmap off of the current configuration
|
||||
|
@ -124,29 +139,77 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failValidateConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
correctSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: correctConfigMap.UID,
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
failParseSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: failParseConfigMap.UID,
|
||||
Namespace: failParseConfigMap.Namespace,
|
||||
Name: failParseConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
failValidateSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: failValidateConfigMap.UID,
|
||||
Namespace: failValidateConfigMap.Namespace,
|
||||
Name: failValidateConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
|
||||
// Note: since we start with the nil source (resets lkg), and we don't wait longer than the 10-minute internal
|
||||
// qualification period before changing it again, we can assume lkg source will be nil in the status
|
||||
// for this entire test, which is why we never set SkipLkg=true here.
|
||||
|
||||
states := []configState{
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource is nil",
|
||||
configSource: nil,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: status.CurLocalMessage,
|
||||
Reason: status.CurLocalOkayReason},
|
||||
expectConfig: nil,
|
||||
event: true,
|
||||
desc: "Node.Spec.ConfigSource is nil",
|
||||
configSource: nil,
|
||||
expectConfigStatus: &configStateStatus{},
|
||||
expectConfig: nil,
|
||||
event: true,
|
||||
},
|
||||
// Node.Spec.ConfigSource has all nil subfields
|
||||
{desc: "Node.Spec.ConfigSource has all nil subfields",
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource has all nil subfields",
|
||||
configSource: &apiv1.NodeConfigSource{},
|
||||
apierr: "exactly one reference subfield must be non-nil",
|
||||
},
|
||||
// Node.Spec.ConfigSource.ConfigMap is partial
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMap is partial",
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap is missing namespace",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Name: "bar",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}, // missing Namespace
|
||||
apierr: "spec.configSource.configMap.namespace: Required value: namespace must be set in spec",
|
||||
apierr: "spec.configSource.configMap.namespace: Required value: namespace must be set",
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap is missing name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Namespace: "bar",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}, // missing Name
|
||||
apierr: "spec.configSource.configMap.name: Required value: name must be set",
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap is missing kubeletConfigKey",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Namespace: "bar",
|
||||
Name: "baz",
|
||||
}}, // missing KubeletConfigKey
|
||||
apierr: "spec.configSource.configMap.kubeletConfigKey: Required value: kubeletConfigKey must be set",
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap is missing uid",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
Namespace: "bar",
|
||||
Name: "baz",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}, // missing uid
|
||||
apierr: "spec.configSource.configMap.uid: Required value: uid must be set in spec",
|
||||
},
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMap.ResourceVersion is illegally specified",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
|
@ -186,6 +249,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
apierr: "spec.configSource.configMap.kubeletConfigKey: Invalid value",
|
||||
},
|
||||
{
|
||||
// TODO(mtaufen): remove in #63221
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap.UID does not align with Namespace/Name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
|
@ -193,51 +257,52 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
Name: correctConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", configMapAPIPath(correctConfigMap), correctConfigMap.UID))},
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Error: fmt.Sprintf(status.SyncErrorFmt, fmt.Sprintf(status.UIDMismatchErrorFmt, "foo", configMapAPIPath(correctConfigMap), correctConfigMap.UID)),
|
||||
},
|
||||
// skip assigned and active, because we don't know what the prior source will be
|
||||
SkipAssigned: true,
|
||||
SkipActive: true,
|
||||
},
|
||||
expectConfig: nil,
|
||||
event: false,
|
||||
},
|
||||
{
|
||||
desc: "correct",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: correctConfigMap.UID,
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(correctConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
desc: "correct",
|
||||
configSource: correctSource,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Active: correctSource,
|
||||
Assigned: correctSource,
|
||||
},
|
||||
},
|
||||
expectConfig: correctKC,
|
||||
event: true,
|
||||
},
|
||||
{
|
||||
desc: "fail-parse",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: failParseConfigMap.UID,
|
||||
Namespace: failParseConfigMap.Namespace,
|
||||
Name: failParseConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgLocalMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailLoadReasonFmt, configMapAPIPath(failParseConfigMap))},
|
||||
desc: "fail-parse",
|
||||
configSource: failParseSource,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Assigned: failParseSource,
|
||||
Error: status.LoadError,
|
||||
},
|
||||
SkipActive: true,
|
||||
},
|
||||
expectConfig: nil,
|
||||
event: true,
|
||||
},
|
||||
{
|
||||
desc: "fail-validate",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: failValidateConfigMap.UID,
|
||||
Namespace: failValidateConfigMap.Namespace,
|
||||
Name: failValidateConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgLocalMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, configMapAPIPath(failValidateConfigMap))},
|
||||
desc: "fail-validate",
|
||||
configSource: failValidateSource,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Assigned: failValidateSource,
|
||||
Error: status.ValidateError,
|
||||
},
|
||||
SkipActive: true,
|
||||
},
|
||||
expectConfig: nil,
|
||||
event: true,
|
||||
},
|
||||
|
@ -270,33 +335,45 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(badConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: lkgConfigMap.UID,
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
badSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: badConfigMap.UID,
|
||||
Namespace: badConfigMap.Namespace,
|
||||
Name: badConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
|
||||
states := []configState{
|
||||
// intended lkg
|
||||
{desc: "intended last-known-good",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: lkgConfigMap.UID,
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(lkgConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
configSource: lkgSource,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Active: lkgSource,
|
||||
Assigned: lkgSource,
|
||||
},
|
||||
SkipLkg: true,
|
||||
},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// bad config
|
||||
{desc: "bad config",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: badConfigMap.UID,
|
||||
Namespace: badConfigMap.Namespace,
|
||||
Name: badConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, configMapAPIPath(lkgConfigMap)),
|
||||
Reason: fmt.Sprintf(status.CurFailLoadReasonFmt, configMapAPIPath(badConfigMap))},
|
||||
configSource: badSource,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Active: lkgSource,
|
||||
Assigned: badSource,
|
||||
LastKnownGood: lkgSource,
|
||||
Error: status.LoadError,
|
||||
},
|
||||
},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
},
|
||||
|
@ -318,34 +395,45 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(combinedConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: combinedConfigMap.UID,
|
||||
Namespace: combinedConfigMap.Namespace,
|
||||
Name: combinedConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
badSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: combinedConfigMap.UID,
|
||||
Namespace: combinedConfigMap.Namespace,
|
||||
Name: combinedConfigMap.Name,
|
||||
KubeletConfigKey: badConfigKey,
|
||||
}}
|
||||
|
||||
states := []configState{
|
||||
// intended lkg
|
||||
{desc: "intended last-known-good",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: combinedConfigMap.UID,
|
||||
Namespace: combinedConfigMap.Namespace,
|
||||
Name: combinedConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(combinedConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
configSource: lkgSource,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Active: lkgSource,
|
||||
Assigned: lkgSource,
|
||||
},
|
||||
SkipLkg: true,
|
||||
},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// bad config
|
||||
{desc: "bad config",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: combinedConfigMap.UID,
|
||||
Namespace: combinedConfigMap.Namespace,
|
||||
Name: combinedConfigMap.Name,
|
||||
KubeletConfigKey: badConfigKey,
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
// TODO(mtaufen): status should be more informative, and report the key being used
|
||||
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, configMapAPIPath(combinedConfigMap)),
|
||||
Reason: fmt.Sprintf(status.CurFailLoadReasonFmt, configMapAPIPath(combinedConfigMap))},
|
||||
configSource: badSource,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Active: lkgSource,
|
||||
Assigned: badSource,
|
||||
LastKnownGood: lkgSource,
|
||||
Error: status.LoadError,
|
||||
},
|
||||
},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
},
|
||||
|
@ -375,31 +463,42 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm2)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
cm1Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: cm1.UID,
|
||||
Namespace: cm1.Namespace,
|
||||
Name: cm1.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
cm2Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: cm2.UID,
|
||||
Namespace: cm2.Namespace,
|
||||
Name: cm2.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
|
||||
states := []configState{
|
||||
{desc: "cm1",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: cm1.UID,
|
||||
Namespace: cm1.Namespace,
|
||||
Name: cm1.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm1)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
configSource: cm1Source,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Active: cm1Source,
|
||||
Assigned: cm1Source,
|
||||
},
|
||||
SkipLkg: true,
|
||||
},
|
||||
expectConfig: kc1,
|
||||
event: true,
|
||||
},
|
||||
|
||||
{desc: "cm2",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: cm2.UID,
|
||||
Namespace: cm2.Namespace,
|
||||
Name: cm2.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm2)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
configSource: cm2Source,
|
||||
expectConfigStatus: &configStateStatus{
|
||||
NodeConfigStatus: apiv1.NodeConfigStatus{
|
||||
Active: cm2Source,
|
||||
Assigned: cm2Source,
|
||||
},
|
||||
SkipLkg: true,
|
||||
},
|
||||
expectConfig: kc2,
|
||||
event: true,
|
||||
},
|
||||
|
@ -424,7 +523,7 @@ func testBothDirections(f *framework.Framework, first *configState, states []con
|
|||
|
||||
time.Sleep(waitAfterFirst)
|
||||
|
||||
// for each state, set to that state, check condition and configz, then reset to first and check again
|
||||
// for each state, set to that state, check expectations, then reset to first and check again
|
||||
for i := range states {
|
||||
By(fmt.Sprintf("from %q to %q", first.desc, states[i].desc))
|
||||
// from first -> states[i], states[i].event fully describes whether we should get a config change event
|
||||
|
@ -436,9 +535,8 @@ func testBothDirections(f *framework.Framework, first *configState, states []con
|
|||
}
|
||||
}
|
||||
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the KubeletConfigOk condition
|
||||
// and (if appropriate) configuration exposed via conifgz are as expected.
|
||||
// The configuration will be converted to the internal type prior to comparison.
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the node spec, status, configz, and latest event match
|
||||
// the expectations described by state.
|
||||
func setAndTestKubeletConfigState(f *framework.Framework, state *configState, expectEvent bool) {
|
||||
// set the desired state, retry a few times in case we are competing with other editors
|
||||
Eventually(func() error {
|
||||
|
@ -459,8 +557,8 @@ func setAndTestKubeletConfigState(f *framework.Framework, state *configState, ex
|
|||
}
|
||||
// check that config source actually got set to what we expect
|
||||
checkNodeConfigSource(f, state.desc, state.configSource)
|
||||
// check condition
|
||||
checkConfigOkCondition(f, state.desc, state.expectConfigOk)
|
||||
// check status
|
||||
checkConfigStatus(f, state.desc, state.expectConfigStatus)
|
||||
// check expectConfig
|
||||
if state.expectConfig != nil {
|
||||
checkConfig(f, state.desc, state.expectConfig)
|
||||
|
@ -490,8 +588,8 @@ func checkNodeConfigSource(f *framework.Framework, desc string, expect *apiv1.No
|
|||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// make sure the ConfigOk node condition eventually matches what we expect
|
||||
func checkConfigOkCondition(f *framework.Framework, desc string, expect *apiv1.NodeCondition) {
|
||||
// make sure the node status eventually matches what we expect
|
||||
func checkConfigStatus(f *framework.Framework, desc string, expect *configStateStatus) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
|
@ -499,30 +597,37 @@ func checkConfigOkCondition(f *framework.Framework, desc string, expect *apiv1.N
|
|||
Eventually(func() error {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("checkConfigOkCondition: case %s: %v", desc, err)
|
||||
return fmt.Errorf("checkConfigStatus: case %s: %v", desc, err)
|
||||
}
|
||||
actual := getKubeletConfigOkCondition(node.Status.Conditions)
|
||||
if actual == nil {
|
||||
return fmt.Errorf("checkConfigOkCondition: case %s: ConfigOk condition not found on node %q", desc, framework.TestContext.NodeName)
|
||||
}
|
||||
if err := expectConfigOk(expect, actual); err != nil {
|
||||
return fmt.Errorf("checkConfigOkCondition: case %s: %v", desc, err)
|
||||
if err := expectConfigStatus(expect, node.Status.Config); err != nil {
|
||||
return fmt.Errorf("checkConfigStatus: case %s: %v", desc, err)
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// if the actual matches the expect, return nil, else error explaining the mismatch
|
||||
// if a subfield of the expect is the empty string, that check is skipped
|
||||
func expectConfigOk(expect, actual *apiv1.NodeCondition) error {
|
||||
if expect.Status != actual.Status {
|
||||
return fmt.Errorf("expected condition Status %q but got %q", expect.Status, actual.Status)
|
||||
func expectConfigStatus(expect *configStateStatus, actual *apiv1.NodeConfigStatus) error {
|
||||
if expect == nil {
|
||||
return fmt.Errorf("expectConfigStatus requires expect to be non-nil (possible malformed test case)")
|
||||
}
|
||||
if len(expect.Message) > 0 && expect.Message != actual.Message {
|
||||
return fmt.Errorf("expected condition Message %q but got %q", expect.Message, actual.Message)
|
||||
if actual == nil {
|
||||
return fmt.Errorf("expectConfigStatus requires actual to be non-nil (possible Kubelet failed to update status)")
|
||||
}
|
||||
if len(expect.Reason) > 0 && expect.Reason != actual.Reason {
|
||||
return fmt.Errorf("expected condition Reason %q but got %q", expect.Reason, actual.Reason)
|
||||
var errs []string
|
||||
if !expect.SkipActive && !apiequality.Semantic.DeepEqual(expect.Active, actual.Active) {
|
||||
errs = append(errs, fmt.Sprintf("expected Active %#v but got %#v", expect.Active, actual.Active))
|
||||
}
|
||||
if !expect.SkipAssigned && !apiequality.Semantic.DeepEqual(expect.Assigned, actual.Assigned) {
|
||||
errs = append(errs, fmt.Sprintf("expected Assigned %#v but got %#v", expect.Assigned, actual.Assigned))
|
||||
}
|
||||
if !expect.SkipLkg && !apiequality.Semantic.DeepEqual(expect.LastKnownGood, actual.LastKnownGood) {
|
||||
errs = append(errs, fmt.Sprintf("expected LastKnownGood %#v but got %#v", expect.LastKnownGood, actual.LastKnownGood))
|
||||
}
|
||||
if expect.Error != actual.Error {
|
||||
errs = append(errs, fmt.Sprintf("expected Error %q but got %q", expect.Error, actual.Error))
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("%s", strings.Join(errs, ","))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -225,17 +225,6 @@ func setNodeConfigSource(f *framework.Framework, source *apiv1.NodeConfigSource)
|
|||
return nil
|
||||
}
|
||||
|
||||
// getKubeletConfigOkCondition returns the first NodeCondition in `cs` with Type == apiv1.NodeKubeletConfigOk,
|
||||
// or if no such condition exists, returns nil.
|
||||
func getKubeletConfigOkCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
for i := range cs {
|
||||
if cs[i].Type == apiv1.NodeKubeletConfigOk {
|
||||
return &cs[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Causes the test to fail, or returns a status 200 response from the /configz endpoint
|
||||
func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Response {
|
||||
endpoint := fmt.Sprintf("http://127.0.0.1:8080/api/v1/nodes/%s/proxy/configz", framework.TestContext.NodeName)
|
||||
|
|
Loading…
Reference in New Issue