mirror of https://github.com/k3s-io/k3s
Revert "bootstrap: Start hostNetwork pods even if network plugin not ready"
parent
0dc983a4c0
commit
94f580ef03
|
@ -91,6 +91,7 @@ EOF
|
|||
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||
cbr-cidr: 10.123.45.0/29
|
||||
EOF
|
||||
else
|
||||
# If the kubelet is running disconnected from a master, give it a fixed
|
||||
|
@ -109,6 +110,7 @@ salt-node-role() {
|
|||
grains:
|
||||
roles:
|
||||
- kubernetes-pool
|
||||
cbr-cidr: 10.123.45.0/29
|
||||
cloud: aws
|
||||
api_servers: '${API_SERVERS}'
|
||||
EOF
|
||||
|
|
|
@ -949,6 +949,7 @@ EOF
|
|||
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||
cbr-cidr: 10.123.45.0/29
|
||||
EOF
|
||||
else
|
||||
# If the kubelet is running disconnected from a master, give it a fixed
|
||||
|
@ -967,6 +968,7 @@ function salt-node-role() {
|
|||
grains:
|
||||
roles:
|
||||
- kubernetes-pool
|
||||
cbr-cidr: 10.123.45.0/29
|
||||
cloud: gce
|
||||
api_servers: '${KUBERNETES_MASTER_NAME}'
|
||||
EOF
|
||||
|
|
|
@ -486,8 +486,12 @@ function start-kubelet {
|
|||
if [[ ! -z "${KUBELET_APISERVER:-}" && ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
|
||||
flags+=" --api-servers=https://${KUBELET_APISERVER}"
|
||||
flags+=" --register-schedulable=false"
|
||||
# need at least a /29 pod cidr for now due to #32844
|
||||
# TODO: determine if we still allow non-hostnetwork pods to run on master, clean up master pod setup
|
||||
# WARNING: potential ip range collision with 10.123.45.0/29
|
||||
flags+=" --pod-cidr=10.123.45.0/29"
|
||||
reconcile_cidr="false"
|
||||
else
|
||||
# Standalone mode (not widely used?)
|
||||
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
|
||||
fi
|
||||
else # For nodes
|
||||
|
|
|
@ -155,7 +155,7 @@ assemble_kubelet_flags() {
|
|||
if [ ! -z "${KUBELET_APISERVER:-}" ] && \
|
||||
[ ! -z "${KUBELET_CERT:-}" ] && \
|
||||
[ ! -z "${KUBELET_KEY:-}" ]; then
|
||||
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --api-servers=https://${KUBELET_APISERVER} --register-schedulable=false"
|
||||
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --api-servers=https://${KUBELET_APISERVER} --register-schedulable=false --reconcile-cidr=false --pod-cidr=10.123.45.0/29"
|
||||
else
|
||||
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --pod-cidr=${MASTER_IP_RANGE}"
|
||||
fi
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
{% if grains.kubelet_api_servers is defined -%}
|
||||
{% set api_servers_with_port = "--api-servers=https://" + grains.kubelet_api_servers -%}
|
||||
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false" -%}
|
||||
{% set reconcile_cidr_args = "--reconcile-cidr=false" -%}
|
||||
{% else -%}
|
||||
{% set api_servers_with_port = "" -%}
|
||||
{% endif -%}
|
||||
|
|
|
@ -1496,12 +1496,6 @@ func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) {
|
|||
// can be admitted, a brief single-word reason and a message explaining why
|
||||
// the pod cannot be admitted.
|
||||
func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, string) {
|
||||
if rs := kl.runtimeState.networkErrors(); len(rs) != 0 {
|
||||
if !podUsesHostNetwork(pod) {
|
||||
return false, "NetworkNotReady", fmt.Sprintf("Network is not ready: %v", rs)
|
||||
}
|
||||
}
|
||||
|
||||
// the kubelet will invoke each pod admit handler in sequence
|
||||
// if any handler rejects, the pod is rejected.
|
||||
// TODO: move out of disk check into a pod admitter
|
||||
|
@ -1538,7 +1532,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
|||
defer housekeepingTicker.Stop()
|
||||
plegCh := kl.pleg.Watch()
|
||||
for {
|
||||
if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 {
|
||||
if rs := kl.runtimeState.errors(); len(rs) != 0 {
|
||||
glog.Infof("skipping pod synchronization - %v", rs)
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
|
|
|
@ -568,8 +568,7 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
|
|||
// ref: https://github.com/kubernetes/kubernetes/issues/16961
|
||||
currentTime := unversioned.NewTime(kl.clock.Now())
|
||||
var newNodeReadyCondition api.NodeCondition
|
||||
rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...)
|
||||
if len(rs) == 0 {
|
||||
if rs := kl.runtimeState.errors(); len(rs) == 0 {
|
||||
newNodeReadyCondition = api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
|
|
|
@ -83,7 +83,6 @@ func TestRunOnce(t *testing.T) {
|
|||
kubeClient: &fake.Clientset{},
|
||||
hostname: testKubeletHostname,
|
||||
nodeName: testKubeletHostname,
|
||||
runtimeState: newRuntimeState(time.Second),
|
||||
}
|
||||
kb.containerManager = cm.NewStubContainerManager()
|
||||
|
||||
|
|
|
@ -68,13 +68,16 @@ func (s *runtimeState) setInitError(err error) {
|
|||
s.initError = err
|
||||
}
|
||||
|
||||
func (s *runtimeState) runtimeErrors() []string {
|
||||
func (s *runtimeState) errors() []string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
var ret []string
|
||||
if s.initError != nil {
|
||||
ret = append(ret, s.initError.Error())
|
||||
}
|
||||
if s.networkError != nil {
|
||||
ret = append(ret, s.networkError.Error())
|
||||
}
|
||||
if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
|
||||
ret = append(ret, "container runtime is down")
|
||||
}
|
||||
|
@ -84,16 +87,6 @@ func (s *runtimeState) runtimeErrors() []string {
|
|||
return ret
|
||||
}
|
||||
|
||||
func (s *runtimeState) networkErrors() []string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
var ret []string
|
||||
if s.networkError != nil {
|
||||
ret = append(ret, s.networkError.Error())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func newRuntimeState(
|
||||
runtimeSyncThreshold time.Duration,
|
||||
) *runtimeState {
|
||||
|
|
Loading…
Reference in New Issue