mirror of https://github.com/k3s-io/k3s
fix some typos
parent
988c9d619e
commit
545aca3d18
|
@ -16,6 +16,12 @@ limitations under the License.
|
|||
|
||||
package kubelet
|
||||
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
NetworkNotReadyErrorMsg = "network is not ready"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNetworkUnknown = errors.New("network state unknown")
|
||||
)
|
||||
|
|
|
@ -1563,9 +1563,9 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
}
|
||||
|
||||
// If the network plugin is not ready, only start the pod if it uses the host network
|
||||
if rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !kubecontainer.IsHostNetworkPod(pod) {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "%s: %v", NetworkNotReadyErrorMsg, rs)
|
||||
return fmt.Errorf("%s: %v", NetworkNotReadyErrorMsg, rs)
|
||||
if err := kl.runtimeState.networkErrors(); err != nil && !kubecontainer.IsHostNetworkPod(pod) {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "%s: %v", NetworkNotReadyErrorMsg, err)
|
||||
return fmt.Errorf("%s: %v", NetworkNotReadyErrorMsg, err)
|
||||
}
|
||||
|
||||
// Create Cgroups for the pod and apply resource parameters
|
||||
|
@ -1820,8 +1820,8 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
|||
)
|
||||
duration := base
|
||||
for {
|
||||
if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 {
|
||||
klog.Infof("skipping pod synchronization - %v", rs)
|
||||
if err := kl.runtimeState.runtimeErrors(); err != nil {
|
||||
klog.Infof("skipping pod synchronization - %v", err)
|
||||
// exponential backoff
|
||||
time.Sleep(duration)
|
||||
duration = time.Duration(math.Min(float64(max), factor*float64(duration)))
|
||||
|
|
|
@ -18,6 +18,7 @@ go_library(
|
|||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
|
@ -438,8 +439,8 @@ func GoRuntime() Setter {
|
|||
// ReadyCondition returns a Setter that updates the v1.NodeReady condition on the node.
|
||||
func ReadyCondition(
|
||||
nowFunc func() time.Time, // typically Kubelet.clock.Now
|
||||
runtimeErrorsFunc func() []string, // typically Kubelet.runtimeState.runtimeErrors
|
||||
networkErrorsFunc func() []string, // typically Kubelet.runtimeState.networkErrors
|
||||
runtimeErrorsFunc func() error, // typically Kubelet.runtimeState.runtimeErrors
|
||||
networkErrorsFunc func() error, // typically Kubelet.runtimeState.networkErrors
|
||||
appArmorValidateHostFunc func() error, // typically Kubelet.appArmorValidator.ValidateHost, might be nil depending on whether there was an appArmorValidator
|
||||
cmStatusFunc func() cm.Status, // typically Kubelet.containerManager.Status
|
||||
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
|
||||
|
@ -456,7 +457,7 @@ func ReadyCondition(
|
|||
Message: "kubelet is posting ready status",
|
||||
LastHeartbeatTime: currentTime,
|
||||
}
|
||||
rs := append(runtimeErrorsFunc(), networkErrorsFunc()...)
|
||||
errs := []error{runtimeErrorsFunc(), networkErrorsFunc()}
|
||||
requiredCapacities := []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||
requiredCapacities = append(requiredCapacities, v1.ResourceEphemeralStorage)
|
||||
|
@ -468,14 +469,14 @@ func ReadyCondition(
|
|||
}
|
||||
}
|
||||
if len(missingCapacities) > 0 {
|
||||
rs = append(rs, fmt.Sprintf("Missing node capacity for resources: %s", strings.Join(missingCapacities, ", ")))
|
||||
errs = append(errs, fmt.Errorf("Missing node capacity for resources: %s", strings.Join(missingCapacities, ", ")))
|
||||
}
|
||||
if len(rs) > 0 {
|
||||
if aggregatedErr := errors.NewAggregate(errs); aggregatedErr != nil {
|
||||
newNodeReadyCondition = v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletNotReady",
|
||||
Message: strings.Join(rs, ","),
|
||||
Message: aggregatedErr.Error(),
|
||||
LastHeartbeatTime: currentTime,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package nodestatus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
|
@ -890,8 +891,8 @@ func TestReadyCondition(t *testing.T) {
|
|||
cases := []struct {
|
||||
desc string
|
||||
node *v1.Node
|
||||
runtimeErrors []string
|
||||
networkErrors []string
|
||||
runtimeErrors error
|
||||
networkErrors error
|
||||
appArmorValidateHostFunc func() error
|
||||
cmStatus cm.Status
|
||||
expectConditions []v1.NodeCondition
|
||||
|
@ -926,24 +927,12 @@ func TestReadyCondition(t *testing.T) {
|
|||
},
|
||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status. WARNING: foo", now, now)},
|
||||
},
|
||||
{
|
||||
desc: "new, not ready: runtime errors",
|
||||
node: withCapacity.DeepCopy(),
|
||||
runtimeErrors: []string{"foo", "bar"},
|
||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "foo,bar", now, now)},
|
||||
},
|
||||
{
|
||||
desc: "new, not ready: network errors",
|
||||
node: withCapacity.DeepCopy(),
|
||||
networkErrors: []string{"foo", "bar"},
|
||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "foo,bar", now, now)},
|
||||
},
|
||||
{
|
||||
desc: "new, not ready: runtime and network errors",
|
||||
node: withCapacity.DeepCopy(),
|
||||
runtimeErrors: []string{"runtime"},
|
||||
networkErrors: []string{"network"},
|
||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "runtime,network", now, now)},
|
||||
runtimeErrors: errors.New("runtime"),
|
||||
networkErrors: errors.New("network"),
|
||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "[runtime, network]", now, now)},
|
||||
},
|
||||
{
|
||||
desc: "new, not ready: missing capacities",
|
||||
|
@ -973,7 +962,7 @@ func TestReadyCondition(t *testing.T) {
|
|||
node.Status.Conditions = []v1.NodeCondition{*makeReadyCondition(true, "", before, before)}
|
||||
return node
|
||||
}(),
|
||||
runtimeErrors: []string{"foo"},
|
||||
runtimeErrors: errors.New("foo"),
|
||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "foo", now, now)},
|
||||
expectEvents: []testEvent{
|
||||
{
|
||||
|
@ -999,17 +988,17 @@ func TestReadyCondition(t *testing.T) {
|
|||
node.Status.Conditions = []v1.NodeCondition{*makeReadyCondition(false, "", before, before)}
|
||||
return node
|
||||
}(),
|
||||
runtimeErrors: []string{"foo"},
|
||||
runtimeErrors: errors.New("foo"),
|
||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "foo", before, now)},
|
||||
expectEvents: []testEvent{},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
runtimeErrorsFunc := func() []string {
|
||||
runtimeErrorsFunc := func() error {
|
||||
return tc.runtimeErrors
|
||||
}
|
||||
networkErrorsFunc := func() []string {
|
||||
networkErrorsFunc := func() error {
|
||||
return tc.networkErrors
|
||||
}
|
||||
cmStatusFunc := func() cm.Status {
|
||||
|
|
|
@ -17,9 +17,12 @@ limitations under the License.
|
|||
package kubelet
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
type runtimeState struct {
|
||||
|
@ -70,32 +73,32 @@ func (s *runtimeState) podCIDR() string {
|
|||
return s.cidr
|
||||
}
|
||||
|
||||
func (s *runtimeState) runtimeErrors() []string {
|
||||
func (s *runtimeState) runtimeErrors() error {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
var ret []string
|
||||
errs := []error{}
|
||||
if s.lastBaseRuntimeSync.IsZero() {
|
||||
ret = append(ret, "container runtime status check may not have completed yet")
|
||||
errs = append(errs, errors.New("container runtime status check may not have completed yet."))
|
||||
} else if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
|
||||
ret = append(ret, "container runtime is down")
|
||||
errs = append(errs, errors.New("container runtime is down."))
|
||||
}
|
||||
for _, hc := range s.healthChecks {
|
||||
if ok, err := hc.fn(); !ok {
|
||||
ret = append(ret, fmt.Sprintf("%s is not healthy: %v", hc.name, err))
|
||||
errs = append(errs, fmt.Errorf("%s is not healthy: %v.", hc.name, err))
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func (s *runtimeState) networkErrors() []string {
|
||||
func (s *runtimeState) networkErrors() error {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
var ret []string
|
||||
errs := []error{}
|
||||
if s.networkError != nil {
|
||||
ret = append(ret, s.networkError.Error())
|
||||
errs = append(errs, s.networkError)
|
||||
}
|
||||
return ret
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func newRuntimeState(
|
||||
|
@ -104,6 +107,6 @@ func newRuntimeState(
|
|||
return &runtimeState{
|
||||
lastBaseRuntimeSync: time.Time{},
|
||||
baseRuntimeSyncThreshold: runtimeSyncThreshold,
|
||||
networkError: fmt.Errorf("network state unknown"),
|
||||
networkError: ErrNetworkUnknown,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@ type Reconciler interface {
|
|||
// safely (prevents more than one operation from being triggered on the same
|
||||
// volume)
|
||||
// mounter - mounter passed in from kubelet, passed down unmount path
|
||||
// volumePluginMrg - volume plugin manager passed from kubelet
|
||||
// volumePluginMgr - volume plugin manager passed from kubelet
|
||||
func NewReconciler(
|
||||
kubeClient clientset.Interface,
|
||||
controllerAttachDetachEnabled bool,
|
||||
|
|
Loading…
Reference in New Issue