mirror of https://github.com/k3s-io/k3s
Rename ConfigOK to KubeletConfigOk
This is a more accurate name for the condition, as it describes the status of the Kubelet's configuration. Also cleans up capitalization of internal names.pull/6/head
parent
6535c955bf
commit
d8cc440dd6
|
@ -3533,8 +3533,8 @@ const (
|
|||
NodeDiskPressure NodeConditionType = "DiskPressure"
|
||||
// NodeNetworkUnavailable means that network for the node is not correctly configured.
|
||||
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
|
||||
// NodeConfigOK indicates whether the kubelet is correctly configured
|
||||
NodeConfigOK NodeConditionType = "ConfigOK"
|
||||
// NodeKubeletConfigOk indicates whether the kubelet is correctly configured
|
||||
NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
|
||||
)
|
||||
|
||||
type NodeCondition struct {
|
||||
|
|
|
@ -71,14 +71,14 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
|
|||
|
||||
node, err := latestNode(cc.informer.GetStore(), nodeName)
|
||||
if err != nil {
|
||||
cc.configOK.SetFailSyncCondition(status.FailSyncReasonInformer)
|
||||
cc.configOk.SetFailSyncCondition(status.FailSyncReasonInformer)
|
||||
syncerr = fmt.Errorf("%s, error: %v", status.FailSyncReasonInformer, err)
|
||||
return
|
||||
}
|
||||
|
||||
// check the Node and download any new config
|
||||
if updated, cur, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil {
|
||||
cc.configOK.SetFailSyncCondition(reason)
|
||||
cc.configOk.SetFailSyncCondition(reason)
|
||||
syncerr = fmt.Errorf("%s, error: %v", reason, err)
|
||||
return
|
||||
} else if updated {
|
||||
|
@ -100,7 +100,7 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
|
|||
// - there is no need to restart to update the current config
|
||||
// - there was no error trying to sync configuration
|
||||
// - if, previously, there was an error trying to sync configuration, we need to clear that error from the condition
|
||||
cc.configOK.ClearFailSyncCondition()
|
||||
cc.configOk.ClearFailSyncCondition()
|
||||
}
|
||||
|
||||
// doSyncConfigSource checkpoints and sets the store's current config to the new config or resets config,
|
||||
|
|
|
@ -53,8 +53,8 @@ type Controller struct {
|
|||
// pendingConfigSource; write to this channel to indicate that the config source needs to be synced from the API server
|
||||
pendingConfigSource chan bool
|
||||
|
||||
// configOK manages the ConfigOK condition that is reported in Node.Status.Conditions
|
||||
configOK status.ConfigOKCondition
|
||||
// configOk manages the KubeletConfigOk condition that is reported in Node.Status.Conditions
|
||||
configOk status.ConfigOkCondition
|
||||
|
||||
// informer is the informer that watches the Node object
|
||||
informer cache.SharedInformer
|
||||
|
@ -69,7 +69,7 @@ func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, dynamicCon
|
|||
defaultConfig: defaultConfig,
|
||||
// channels must have capacity at least 1, since we signal with non-blocking writes
|
||||
pendingConfigSource: make(chan bool, 1),
|
||||
configOK: status.NewConfigOKCondition(),
|
||||
configOk: status.NewConfigOkCondition(),
|
||||
checkpointStore: store.NewFsStore(utilfs.DefaultFs{}, filepath.Join(dynamicConfigDir, checkpointsDir)),
|
||||
}
|
||||
}
|
||||
|
@ -95,9 +95,9 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
|
|||
if err == nil {
|
||||
// set the status to indicate we will use the assigned config
|
||||
if curSource != nil {
|
||||
cc.configOK.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curSource.APIPath()), reason, apiv1.ConditionTrue)
|
||||
cc.configOk.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curSource.APIPath()), reason, apiv1.ConditionTrue)
|
||||
} else {
|
||||
cc.configOK.Set(status.CurLocalMessage, reason, apiv1.ConditionTrue)
|
||||
cc.configOk.Set(status.CurLocalMessage, reason, apiv1.ConditionTrue)
|
||||
}
|
||||
|
||||
// update the last-known-good config if necessary, and start a timer that
|
||||
|
@ -125,9 +125,9 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
|
|||
|
||||
// set the status to indicate that we had to roll back to the lkg for the reason reported when we tried to load the assigned config
|
||||
if lkgSource != nil {
|
||||
cc.configOK.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgSource.APIPath()), reason, apiv1.ConditionFalse)
|
||||
cc.configOk.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgSource.APIPath()), reason, apiv1.ConditionFalse)
|
||||
} else {
|
||||
cc.configOK.Set(status.LkgLocalMessage, reason, apiv1.ConditionFalse)
|
||||
cc.configOk.Set(status.LkgLocalMessage, reason, apiv1.ConditionFalse)
|
||||
}
|
||||
|
||||
// return the last-known-good config
|
||||
|
@ -146,11 +146,11 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
|
|||
return
|
||||
}
|
||||
|
||||
// start the ConfigOK condition sync loop
|
||||
// start the ConfigOk condition sync loop
|
||||
go utilpanic.HandlePanic(func() {
|
||||
utillog.Infof("starting ConfigOK condition sync loop")
|
||||
utillog.Infof("starting ConfigOk condition sync loop")
|
||||
wait.JitterUntil(func() {
|
||||
cc.configOK.Sync(client, nodeName)
|
||||
cc.configOk.Sync(client, nodeName)
|
||||
}, 10*time.Second, 0.2, true, wait.NeverStop)
|
||||
})()
|
||||
|
||||
|
|
|
@ -97,8 +97,8 @@ const (
|
|||
EmptyReason = "unknown - reason not provided"
|
||||
)
|
||||
|
||||
// ConfigOKCondition represents a ConfigOK NodeCondition
|
||||
type ConfigOKCondition interface {
|
||||
// ConfigOkCondition represents a ConfigOk NodeCondition
|
||||
type ConfigOkCondition interface {
|
||||
// Set sets the Message, Reason, and Status of the condition
|
||||
Set(message, reason string, status apiv1.ConditionStatus)
|
||||
// SetFailSyncCondition sets the condition for when syncing Kubelet config fails
|
||||
|
@ -109,30 +109,30 @@ type ConfigOKCondition interface {
|
|||
Sync(client clientset.Interface, nodeName string)
|
||||
}
|
||||
|
||||
// configOKCondition implements ConfigOKCondition
|
||||
type configOKCondition struct {
|
||||
// configOkCondition implements ConfigOkCondition
|
||||
type configOkCondition struct {
|
||||
// conditionMux is a mutex on the condition, alternate between setting and syncing the condition
|
||||
conditionMux sync.Mutex
|
||||
// condition is the current ConfigOK node condition, which will be reported in the Node.status.conditions
|
||||
// condition is the current ConfigOk node condition, which will be reported in the Node.status.conditions
|
||||
condition *apiv1.NodeCondition
|
||||
// failedSyncReason is sent in place of the usual reason when the Kubelet is failing to sync the remote config
|
||||
failedSyncReason string
|
||||
// pendingCondition; write to this channel to indicate that ConfigOK needs to be synced to the API server
|
||||
// pendingCondition; write to this channel to indicate that ConfigOk needs to be synced to the API server
|
||||
pendingCondition chan bool
|
||||
}
|
||||
|
||||
// NewConfigOKCondition returns a new ConfigOKCondition
|
||||
func NewConfigOKCondition() ConfigOKCondition {
|
||||
return &configOKCondition{
|
||||
// NewConfigOkCondition returns a new ConfigOkCondition
|
||||
func NewConfigOkCondition() ConfigOkCondition {
|
||||
return &configOkCondition{
|
||||
// channels must have capacity at least 1, since we signal with non-blocking writes
|
||||
pendingCondition: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// unsafeSet sets the current state of the condition
|
||||
// it does not grab the conditionMux lock, so you should generally use setConfigOK unless you need to grab the lock
|
||||
// it does not grab the conditionMux lock, so you should generally use setConfigOk unless you need to grab the lock
|
||||
// at a higher level to synchronize additional operations
|
||||
func (c *configOKCondition) unsafeSet(message, reason string, status apiv1.ConditionStatus) {
|
||||
func (c *configOkCondition) unsafeSet(message, reason string, status apiv1.ConditionStatus) {
|
||||
// We avoid an empty Message, Reason, or Status on the condition. Since we use Patch to update conditions, an empty
|
||||
// field might cause a value from a previous condition to leak through, which can be very confusing.
|
||||
if len(message) == 0 {
|
||||
|
@ -149,21 +149,21 @@ func (c *configOKCondition) unsafeSet(message, reason string, status apiv1.Condi
|
|||
Message: message,
|
||||
Reason: reason,
|
||||
Status: status,
|
||||
Type: apiv1.NodeConfigOK,
|
||||
Type: apiv1.NodeKubeletConfigOk,
|
||||
}
|
||||
|
||||
c.pokeSyncWorker()
|
||||
}
|
||||
|
||||
func (c *configOKCondition) Set(message, reason string, status apiv1.ConditionStatus) {
|
||||
func (c *configOkCondition) Set(message, reason string, status apiv1.ConditionStatus) {
|
||||
c.conditionMux.Lock()
|
||||
defer c.conditionMux.Unlock()
|
||||
c.unsafeSet(message, reason, status)
|
||||
}
|
||||
|
||||
// SetFailSyncCondition updates the ConfigOK status to reflect that we failed to sync to the latest config,
|
||||
// SetFailSyncCondition updates the ConfigOk status to reflect that we failed to sync to the latest config,
|
||||
// e.g. due to a malformed Node.Spec.ConfigSource, a download failure, etc.
|
||||
func (c *configOKCondition) SetFailSyncCondition(reason string) {
|
||||
func (c *configOkCondition) SetFailSyncCondition(reason string) {
|
||||
c.conditionMux.Lock()
|
||||
defer c.conditionMux.Unlock()
|
||||
// set the reason overlay and poke the sync worker to send the update
|
||||
|
@ -172,7 +172,7 @@ func (c *configOKCondition) SetFailSyncCondition(reason string) {
|
|||
}
|
||||
|
||||
// ClearFailSyncCondition removes the "failed to sync" reason overlay
|
||||
func (c *configOKCondition) ClearFailSyncCondition() {
|
||||
func (c *configOkCondition) ClearFailSyncCondition() {
|
||||
c.conditionMux.Lock()
|
||||
defer c.conditionMux.Unlock()
|
||||
// clear the reason overlay and poke the sync worker to send the update
|
||||
|
@ -180,8 +180,8 @@ func (c *configOKCondition) ClearFailSyncCondition() {
|
|||
c.pokeSyncWorker()
|
||||
}
|
||||
|
||||
// pokeSyncWorker notes that the ConfigOK condition needs to be synced to the API server
|
||||
func (c *configOKCondition) pokeSyncWorker() {
|
||||
// pokeSyncWorker notes that the ConfigOk condition needs to be synced to the API server
|
||||
func (c *configOkCondition) pokeSyncWorker() {
|
||||
select {
|
||||
case c.pendingCondition <- true:
|
||||
default:
|
||||
|
@ -190,7 +190,7 @@ func (c *configOKCondition) pokeSyncWorker() {
|
|||
|
||||
// Sync attempts to sync `c.condition` with the Node object for this Kubelet,
|
||||
// if syncing fails, an error is logged, and work is queued for retry.
|
||||
func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
|
||||
func (c *configOkCondition) Sync(client clientset.Interface, nodeName string) {
|
||||
select {
|
||||
case <-c.pendingCondition:
|
||||
default:
|
||||
|
@ -212,21 +212,21 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
|
|||
}()
|
||||
|
||||
if c.condition == nil {
|
||||
utillog.Infof("ConfigOK condition is nil, skipping ConfigOK sync")
|
||||
utillog.Infof("ConfigOk condition is nil, skipping ConfigOk sync")
|
||||
return
|
||||
}
|
||||
|
||||
// get the Node so we can check the current condition
|
||||
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not get Node %q, will not sync ConfigOK condition, error: %v", nodeName, err)
|
||||
err = fmt.Errorf("could not get Node %q, will not sync ConfigOk condition, error: %v", nodeName, err)
|
||||
return
|
||||
}
|
||||
|
||||
// set timestamps
|
||||
syncTime := metav1.NewTime(time.Now())
|
||||
c.condition.LastHeartbeatTime = syncTime
|
||||
if remote := getConfigOK(node.Status.Conditions); remote == nil || !utilequal.ConfigOKEq(remote, c.condition) {
|
||||
if remote := getKubeletConfigOk(node.Status.Conditions); remote == nil || !utilequal.KubeletConfigOkEq(remote, c.condition) {
|
||||
// update transition time the first time we create the condition,
|
||||
// or if we are semantically changing the condition
|
||||
c.condition.LastTransitionTime = syncTime
|
||||
|
@ -269,7 +269,7 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
|
|||
return
|
||||
}
|
||||
|
||||
patchConfigOK(node, condition)
|
||||
patchConfigOk(node, condition)
|
||||
after, err := kuberuntime.Encode(encoder, node)
|
||||
if err != nil {
|
||||
err = fmt.Errorf(`failed to encode "after" node while generating patch, error: %v`, err)
|
||||
|
@ -278,36 +278,36 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
|
|||
|
||||
patch, err := strategicpatch.CreateTwoWayMergePatch(before, after, apiv1.Node{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to generate patch for updating ConfigOK condition, error: %v", err)
|
||||
err = fmt.Errorf("failed to generate patch for updating ConfigOk condition, error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// patch the remote Node object
|
||||
_, err = client.CoreV1().Nodes().PatchStatus(nodeName, patch)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not update ConfigOK condition, error: %v", err)
|
||||
err = fmt.Errorf("could not update ConfigOk condition, error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// patchConfigOK replaces or adds the ConfigOK condition to the node
|
||||
func patchConfigOK(node *apiv1.Node, configOK *apiv1.NodeCondition) {
|
||||
// patchConfigOk replaces or adds the ConfigOk condition to the node
|
||||
func patchConfigOk(node *apiv1.Node, configOk *apiv1.NodeCondition) {
|
||||
for i := range node.Status.Conditions {
|
||||
if node.Status.Conditions[i].Type == apiv1.NodeConfigOK {
|
||||
if node.Status.Conditions[i].Type == apiv1.NodeKubeletConfigOk {
|
||||
// edit the condition
|
||||
node.Status.Conditions[i] = *configOK
|
||||
node.Status.Conditions[i] = *configOk
|
||||
return
|
||||
}
|
||||
}
|
||||
// append the condition
|
||||
node.Status.Conditions = append(node.Status.Conditions, *configOK)
|
||||
node.Status.Conditions = append(node.Status.Conditions, *configOk)
|
||||
}
|
||||
|
||||
// getConfigOK returns the first NodeCondition in `cs` with Type == apiv1.NodeConfigOK,
|
||||
// getKubeletConfigOk returns the first NodeCondition in `cs` with Type == apiv1.NodeKubeletConfigOk,
|
||||
// or if no such condition exists, returns nil.
|
||||
func getConfigOK(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
func getKubeletConfigOk(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
for i := range cs {
|
||||
if cs[i].Type == apiv1.NodeConfigOK {
|
||||
if cs[i].Type == apiv1.NodeKubeletConfigOk {
|
||||
return &cs[i]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ func ObjectRefEq(a, b *apiv1.ObjectReference) bool {
|
|||
return a.UID == b.UID && a.Namespace == b.Namespace && a.Name == b.Name
|
||||
}
|
||||
|
||||
// ConfigOKEq returns true if the two conditions are semantically equivalent in the context of dynamic config
|
||||
func ConfigOKEq(a, b *apiv1.NodeCondition) bool {
|
||||
// KubeletConfigOkEq returns true if the two conditions are semantically equivalent in the context of dynamic config
|
||||
func KubeletConfigOkEq(a, b *apiv1.NodeCondition) bool {
|
||||
return a.Message == b.Message && a.Reason == b.Reason && a.Status == b.Status
|
||||
}
|
||||
|
|
|
@ -3964,8 +3964,8 @@ const (
|
|||
NodeDiskPressure NodeConditionType = "DiskPressure"
|
||||
// NodeNetworkUnavailable means that network for the node is not correctly configured.
|
||||
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
|
||||
// NodeConfigOK indicates whether the kubelet is correctly configured
|
||||
NodeConfigOK NodeConditionType = "ConfigOK"
|
||||
// NodeKubeletConfigOk indicates whether the kubelet is correctly configured
|
||||
NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
|
||||
)
|
||||
|
||||
// NodeCondition contains condition information for a node.
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
type configState struct {
|
||||
desc string
|
||||
configSource *apiv1.NodeConfigSource
|
||||
expectConfigOK *apiv1.NodeCondition
|
||||
expectConfigOk *apiv1.NodeCondition
|
||||
expectConfig *kubeletconfig.KubeletConfiguration
|
||||
// whether the state would cause a config change event as a result of the update to Node.Spec.ConfigSource,
|
||||
// assuming that the current source would have also caused a config change event.
|
||||
|
@ -85,14 +85,14 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
UID: originalConfigMap.UID,
|
||||
Namespace: originalConfigMap.Namespace,
|
||||
Name: originalConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(originalConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: originalKC,
|
||||
}, false)
|
||||
})
|
||||
|
||||
Context("When setting new NodeConfigSources that cause transitions between ConfigOK conditions", func() {
|
||||
Context("When setting new NodeConfigSources that cause transitions between ConfigOk conditions", func() {
|
||||
It("the Kubelet should report the appropriate status and configz", func() {
|
||||
var err error
|
||||
// we base the "correct" configmap off of the current configuration
|
||||
|
@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
// Node.Spec.ConfigSource is nil
|
||||
{desc: "Node.Spec.ConfigSource is nil",
|
||||
configSource: nil,
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: status.CurLocalMessage,
|
||||
Reason: status.CurLocalOkayReason},
|
||||
expectConfig: nil,
|
||||
|
@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
// Node.Spec.ConfigSource has all nil subfields
|
||||
{desc: "Node.Spec.ConfigSource has all nil subfields",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: nil},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonAllNilSubfields)},
|
||||
expectConfig: nil,
|
||||
|
@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: "foo",
|
||||
Name: "bar"}}, // missing Namespace
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonPartialObjectReference)},
|
||||
expectConfig: nil,
|
||||
|
@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{UID: "foo",
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", configMapAPIPath(correctConfigMap), correctConfigMap.UID))},
|
||||
expectConfig: nil,
|
||||
|
@ -171,7 +171,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
UID: correctConfigMap.UID,
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(correctConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: correctKC,
|
||||
|
@ -184,7 +184,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
UID: failParseConfigMap.UID,
|
||||
Namespace: failParseConfigMap.Namespace,
|
||||
Name: failParseConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgLocalMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, configMapAPIPath(failParseConfigMap))},
|
||||
expectConfig: nil,
|
||||
|
@ -197,7 +197,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
UID: failValidateConfigMap.UID,
|
||||
Namespace: failValidateConfigMap.Namespace,
|
||||
Name: failValidateConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgLocalMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, configMapAPIPath(failValidateConfigMap))},
|
||||
expectConfig: nil,
|
||||
|
@ -239,7 +239,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
UID: lkgConfigMap.UID,
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(lkgConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: lkgKC,
|
||||
|
@ -252,7 +252,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
UID: badConfigMap.UID,
|
||||
Namespace: badConfigMap.Namespace,
|
||||
Name: badConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, configMapAPIPath(lkgConfigMap)),
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, configMapAPIPath(badConfigMap))},
|
||||
expectConfig: lkgKC,
|
||||
|
@ -290,7 +290,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
UID: cm1.UID,
|
||||
Namespace: cm1.Namespace,
|
||||
Name: cm1.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm1)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: kc1,
|
||||
|
@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
|||
UID: cm2.UID,
|
||||
Namespace: cm2.Namespace,
|
||||
Name: cm2.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm2)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: kc2,
|
||||
|
@ -341,7 +341,7 @@ func testBothDirections(f *framework.Framework, first *configState, states []con
|
|||
}
|
||||
}
|
||||
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the ConfigOK condition
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the ConfigOk condition
|
||||
// and (if appropriate) configuration exposed via conifgz are as expected.
|
||||
// The configuration will be converted to the internal type prior to comparison.
|
||||
func setAndTestKubeletConfigState(f *framework.Framework, state *configState, expectEvent bool) {
|
||||
|
@ -355,7 +355,7 @@ func setAndTestKubeletConfigState(f *framework.Framework, state *configState, ex
|
|||
// check that config source actually got set to what we expect
|
||||
checkNodeConfigSource(f, state.desc, state.configSource)
|
||||
// check condition
|
||||
checkConfigOKCondition(f, state.desc, state.expectConfigOK)
|
||||
checkConfigOkCondition(f, state.desc, state.expectConfigOk)
|
||||
// check expectConfig
|
||||
if state.expectConfig != nil {
|
||||
checkConfig(f, state.desc, state.expectConfig)
|
||||
|
@ -385,8 +385,8 @@ func checkNodeConfigSource(f *framework.Framework, desc string, expect *apiv1.No
|
|||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// make sure the ConfigOK node condition eventually matches what we expect
|
||||
func checkConfigOKCondition(f *framework.Framework, desc string, expect *apiv1.NodeCondition) {
|
||||
// make sure the ConfigOk node condition eventually matches what we expect
|
||||
func checkConfigOkCondition(f *framework.Framework, desc string, expect *apiv1.NodeCondition) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
|
@ -395,14 +395,14 @@ func checkConfigOKCondition(f *framework.Framework, desc string, expect *apiv1.N
|
|||
Eventually(func() error {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("checkConfigOKCondition: case %s: %v", desc, err)
|
||||
return fmt.Errorf("checkConfigOkCondition: case %s: %v", desc, err)
|
||||
}
|
||||
actual := getConfigOKCondition(node.Status.Conditions)
|
||||
actual := getKubeletConfigOkCondition(node.Status.Conditions)
|
||||
if actual == nil {
|
||||
return fmt.Errorf("checkConfigOKCondition: case %s: ConfigOK condition not found on node %q", desc, framework.TestContext.NodeName)
|
||||
return fmt.Errorf("checkConfigOkCondition: case %s: ConfigOk condition not found on node %q", desc, framework.TestContext.NodeName)
|
||||
}
|
||||
if err := expectConfigOK(expect, actual); err != nil {
|
||||
return fmt.Errorf("checkConfigOKCondition: case %s: %v", desc, err)
|
||||
if err := expectConfigOk(expect, actual); err != nil {
|
||||
return fmt.Errorf("checkConfigOkCondition: case %s: %v", desc, err)
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
|
@ -410,7 +410,7 @@ func checkConfigOKCondition(f *framework.Framework, desc string, expect *apiv1.N
|
|||
|
||||
// if the actual matches the expect, return nil, else error explaining the mismatch
|
||||
// if a subfield of the expect is the empty string, that check is skipped
|
||||
func expectConfigOK(expect, actual *apiv1.NodeCondition) error {
|
||||
func expectConfigOk(expect, actual *apiv1.NodeCondition) error {
|
||||
if expect.Status != actual.Status {
|
||||
return fmt.Errorf("expected condition Status %q but got %q", expect.Status, actual.Status)
|
||||
}
|
||||
|
|
|
@ -219,11 +219,11 @@ func setNodeConfigSource(f *framework.Framework, source *apiv1.NodeConfigSource)
|
|||
return nil
|
||||
}
|
||||
|
||||
// getConfigOK returns the first NodeCondition in `cs` with Type == apiv1.NodeConfigOK,
|
||||
// getKubeletConfigOkCondition returns the first NodeCondition in `cs` with Type == apiv1.NodeKubeletConfigOk,
|
||||
// or if no such condition exists, returns nil.
|
||||
func getConfigOKCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
func getKubeletConfigOkCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
for i := range cs {
|
||||
if cs[i].Type == apiv1.NodeConfigOK {
|
||||
if cs[i].Type == apiv1.NodeKubeletConfigOk {
|
||||
return &cs[i]
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue