mirror of https://github.com/k3s-io/k3s
Merge pull request #39951 from shyamjvs/fix-kubemark-npd
Automatic merge from submit-queue (batch tested with PRs 40081, 39951) Passing correct master address to kubemark NPD & authenticating+authorizing it with apiserver Fixes #39245 Fixes https://github.com/kubernetes/node-problem-detector/issues/50 Added RBAC for npd and fixed issue with the npd falling back to inClusterConfig. cc @kubernetes/sig-scalability-misc @wojtek-t @gmarekpull/6/head
commit
da7d17c8dd
|
@ -27,9 +27,9 @@
|
|||
}
|
||||
},
|
||||
{
|
||||
"name": "npdconfig-volume",
|
||||
"name": "kernelmonitorconfig-volume",
|
||||
"configMap": {
|
||||
"name": "node-problem-detector-config"
|
||||
"name": "node-configmap"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -43,6 +43,10 @@
|
|||
"hostPath": {
|
||||
"path": "/var/log"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "no-serviceaccount-access-to-real-master",
|
||||
"emptyDir": {}
|
||||
}
|
||||
],
|
||||
"containers": [
|
||||
|
@ -65,7 +69,7 @@
|
|||
}
|
||||
},
|
||||
{
|
||||
"name": "MY_POD_NAME",
|
||||
"name": "NODE_NAME",
|
||||
"valueFrom": {
|
||||
"fieldRef": {
|
||||
"fieldPath": "metadata.name"
|
||||
|
@ -76,12 +80,13 @@
|
|||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"./kubemark.sh --morph=kubelet --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --v=2 1>>/var/logs/kubelet_$(MY_POD_NAME).log 2>&1"
|
||||
"./kubemark.sh --morph=kubelet --name=$(NODE_NAME) --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --v=2 1>>/var/logs/kubelet_$(MY_POD_NAME).log 2>&1"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "kubeconfig-volume",
|
||||
"mountPath": "/kubeconfig"
|
||||
"mountPath": "/kubeconfig",
|
||||
"readOnly": true
|
||||
},
|
||||
{
|
||||
"name": "logs-volume",
|
||||
|
@ -113,7 +118,7 @@
|
|||
}
|
||||
},
|
||||
{
|
||||
"name": "MY_POD_NAME",
|
||||
"name": "NODE_NAME",
|
||||
"valueFrom": {
|
||||
"fieldRef": {
|
||||
"fieldPath": "metadata.name"
|
||||
|
@ -124,12 +129,13 @@
|
|||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"./kubemark.sh --morph=proxy --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --v=2 1>>/var/logs/kube_proxy_$(MY_POD_NAME).log 2>&1"
|
||||
"./kubemark.sh --morph=proxy --name=$(NODE_NAME) --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --v=2 1>>/var/logs/kube_proxy_$(MY_POD_NAME).log 2>&1"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "kubeconfig-volume",
|
||||
"mountPath": "/kubeconfig"
|
||||
"mountPath": "/kubeconfig",
|
||||
"readOnly": true
|
||||
},
|
||||
{
|
||||
"name": "logs-volume",
|
||||
|
@ -145,20 +151,32 @@
|
|||
},
|
||||
{
|
||||
"name": "hollow-node-problem-detector",
|
||||
"image": "gcr.io/google_containers/node-problem-detector:v0.2",
|
||||
"image": "gcr.io/google_containers/node-problem-detector:v0.3",
|
||||
"env": [
|
||||
{
|
||||
"name": "NODE_NAME",
|
||||
"valueFrom": {
|
||||
"fieldRef": {
|
||||
"fieldPath": "spec.nodeName"
|
||||
"fieldPath": "metadata.name"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"command": [
|
||||
"/node-problem-detector",
|
||||
"--kernel-monitor=/config/kernel.monitor",
|
||||
"--apiserver-override=https://{{master_ip}}:443?inClusterConfig=false&auth=/kubeconfig/npd.kubeconfig",
|
||||
"--alsologtostderr",
|
||||
"1>>/var/logs/npd_$(NODE_NAME).log 2>&1"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "npdconfig-volume",
|
||||
"name": "kubeconfig-volume",
|
||||
"mountPath": "/kubeconfig",
|
||||
"readOnly": true
|
||||
},
|
||||
{
|
||||
"name": "kernelmonitorconfig-volume",
|
||||
"mountPath": "/config",
|
||||
"readOnly": true
|
||||
},
|
||||
|
@ -166,6 +184,15 @@
|
|||
"name": "kernellog-volume",
|
||||
"mountPath": "/log",
|
||||
"readOnly": true
|
||||
},
|
||||
{
|
||||
"name": "no-serviceaccount-access-to-real-master",
|
||||
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
|
||||
"readOnly": true
|
||||
},
|
||||
{
|
||||
"name": "logs-volume",
|
||||
"mountPath": "/var/logs"
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
# This is the role binding for the node-problem-detector.
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: node-problem-detector-binding
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:node-problem-detector
|
||||
subjects:
|
||||
- apiVersion: rbac/v1alpha1
|
||||
kind: User
|
||||
name: system:node-problem-detector
|
|
@ -51,35 +51,6 @@ EOF
|
|||
|
||||
writeEnvironmentFile
|
||||
|
||||
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
|
||||
|
||||
KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
|
||||
if [[ -z "${KUBEMARK_BIN}" ]]; then
|
||||
echo 'Cannot find cmd/kubemark binary'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Copying kubemark to ${MAKE_DIR}"
|
||||
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
|
||||
|
||||
CURR_DIR=`pwd`
|
||||
cd "${MAKE_DIR}"
|
||||
RETRIES=3
|
||||
for attempt in $(seq 1 ${RETRIES}); do
|
||||
if ! make; then
|
||||
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
|
||||
echo "${color_red}Make failed. Exiting.${color_norm}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
|
||||
sleep $(($attempt * 5))
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
rm kubemark
|
||||
cd $CURR_DIR
|
||||
|
||||
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}"
|
||||
|
||||
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
|
||||
|
@ -134,10 +105,7 @@ create-certs ${MASTER_IP}
|
|||
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
HEAPSTER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
|
||||
echo "${CA_CERT_BASE64}" | base64 --decode > "${RESOURCE_DIRECTORY}/ca.crt"
|
||||
echo "${KUBECFG_CERT_BASE64}" | base64 --decode > "${RESOURCE_DIRECTORY}/kubecfg.crt"
|
||||
echo "${KUBECFG_KEY_BASE64}" | base64 --decode > "${RESOURCE_DIRECTORY}/kubecfg.key"
|
||||
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
|
||||
until gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" --command="ls" &> /dev/null; do
|
||||
sleep 1
|
||||
|
@ -156,6 +124,7 @@ run-gcloud-compute-with-retries ssh --zone="${ZONE}" --project="${PROJECT}" "${M
|
|||
sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo \"${HEAPSTER_TOKEN},system:heapster,uid:heapster\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo \"${NODE_PROBLEM_DETECTOR_TOKEN},system:node-problem-detector,uid:system:node-problem-detector\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo ${password},admin,admin > /etc/srv/kubernetes/basic_auth.csv\""
|
||||
|
||||
run-gcloud-compute-with-retries copy-files --zone="${ZONE}" --project="${PROJECT}" \
|
||||
|
@ -177,6 +146,34 @@ gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \
|
|||
sudo chmod a+x /home/kubernetes/start-kubemark-master.sh && \
|
||||
sudo bash /home/kubernetes/start-kubemark-master.sh"
|
||||
|
||||
# Setup the docker image for kubemark hollow-node.
|
||||
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
|
||||
KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
|
||||
if [[ -z "${KUBEMARK_BIN}" ]]; then
|
||||
echo 'Cannot find cmd/kubemark binary'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Copying kubemark to ${MAKE_DIR}"
|
||||
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
|
||||
CURR_DIR=`pwd`
|
||||
cd "${MAKE_DIR}"
|
||||
RETRIES=3
|
||||
for attempt in $(seq 1 ${RETRIES}); do
|
||||
if ! make; then
|
||||
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
|
||||
echo "${color_red}Make failed. Exiting.${color_norm}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
|
||||
sleep $(($attempt * 5))
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
rm kubemark
|
||||
cd $CURR_DIR
|
||||
|
||||
# Create kubeconfig for Kubelet.
|
||||
KUBELET_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
kind: Config
|
||||
|
@ -195,7 +192,7 @@ contexts:
|
|||
cluster: kubemark
|
||||
user: kubelet
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context" | base64 | tr -d "\n\r")
|
||||
current-context: kubemark-context")
|
||||
|
||||
# Create kubeconfig for Kubeproxy.
|
||||
KUBEPROXY_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
|
@ -214,7 +211,7 @@ contexts:
|
|||
cluster: kubemark
|
||||
user: kube-proxy
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context" | base64 | tr -d "\n\r")
|
||||
current-context: kubemark-context")
|
||||
|
||||
# Create kubeconfig for Heapster.
|
||||
HEAPSTER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
|
@ -233,39 +230,28 @@ contexts:
|
|||
cluster: kubemark
|
||||
user: heapster
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context" | base64 | tr -d "\n\r")
|
||||
current-context: kubemark-context")
|
||||
|
||||
KUBECONFIG_SECRET="${RESOURCE_DIRECTORY}/kubeconfig_secret.json"
|
||||
cat > "${KUBECONFIG_SECRET}" << EOF
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Secret",
|
||||
"metadata": {
|
||||
"name": "kubeconfig"
|
||||
},
|
||||
"type": "Opaque",
|
||||
"data": {
|
||||
"kubelet.kubeconfig": "${KUBELET_KUBECONFIG_CONTENTS}",
|
||||
"kubeproxy.kubeconfig": "${KUBEPROXY_KUBECONFIG_CONTENTS}",
|
||||
"heapster.kubeconfig": "${HEAPSTER_KUBECONFIG_CONTENTS}"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
NODE_CONFIGMAP="${RESOURCE_DIRECTORY}/node_config_map.json"
|
||||
cat > "${NODE_CONFIGMAP}" << EOF
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ConfigMap",
|
||||
"metadata": {
|
||||
"name": "node-configmap"
|
||||
},
|
||||
"data": {
|
||||
"content.type": "${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
# Create kubeconfig for NodeProblemDetector.
|
||||
NPD_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: node-problem-detector
|
||||
user:
|
||||
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: node-problem-detector
|
||||
name: kubemark-npd-context
|
||||
current-context: kubemark-npd-context")
|
||||
|
||||
# Create kubeconfig for local kubectl.
|
||||
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
|
||||
cat > "${LOCAL_KUBECONFIG}" << EOF
|
||||
apiVersion: v1
|
||||
|
@ -292,6 +278,7 @@ EOF
|
|||
|
||||
sed "s/{{numreplicas}}/${NUM_NODES:-10}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.json" > "${RESOURCE_DIRECTORY}/hollow-node.json"
|
||||
sed -i'' -e "s/{{project}}/${PROJECT}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
|
||||
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
|
||||
|
||||
mkdir "${RESOURCE_DIRECTORY}/addons" || true
|
||||
|
||||
|
@ -303,16 +290,23 @@ eventer_mem_per_node=500
|
|||
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))
|
||||
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
||||
|
||||
# Create kubemark namespace.
|
||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
|
||||
"${KUBECTL}" create -f "${KUBECONFIG_SECRET}" --namespace="kubemark"
|
||||
"${KUBECTL}" create -f "${NODE_CONFIGMAP}" --namespace="kubemark"
|
||||
# Create configmap for configuring hollow- kubelet, proxy and npd.
|
||||
"${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \
|
||||
--from-literal=content.type="${TEST_CLUSTER_API_CONTENT_TYPE}" \
|
||||
--from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
|
||||
# Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
|
||||
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
|
||||
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}"
|
||||
# Create addon pods.
|
||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
|
||||
"${KUBECTL}" create configmap node-problem-detector-config --from-file="${RESOURCE_DIRECTORY}/kernel-monitor.json" --namespace="kubemark"
|
||||
# Create the replication controller for hollow-nodes.
|
||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.json" --namespace="kubemark"
|
||||
|
||||
rm "${KUBECONFIG_SECRET}"
|
||||
rm "${NODE_CONFIGMAP}"
|
||||
|
||||
echo "Waiting for all HollowNodes to become Running..."
|
||||
start=$(date +%s)
|
||||
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
|
||||
|
|
Loading…
Reference in New Issue