Refactored kubemark code into provider-specific and provider-independent parts [Part-1]

pull/6/head
Shyam Jeedigunta 2017-01-19 14:55:16 +01:00
parent 5b75980c32
commit c62e5214c3
9 changed files with 363 additions and 279 deletions

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# Copyright 2015 The Kubernetes Authors. # Copyright 2017 The Kubernetes Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.

View File

@ -15,5 +15,6 @@
# limitations under the License. # limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source ${KUBE_ROOT}/cluster/gce/util.sh source ${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh
source ${KUBE_ROOT}/cluster/kubemark/config-default.sh source ${KUBE_ROOT}/cluster/${CLOUD_PROVIDER}/util.sh
source ${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh

View File

@ -0,0 +1,17 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CLOUD_PROVIDER="${CLOUD_PROVIDER:-gce}"

View File

@ -14,7 +14,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
source "${KUBE_ROOT}/cluster/kubemark/config-default.sh" source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"
source "${KUBE_ROOT}/cluster/kubemark/util.sh" source "${KUBE_ROOT}/cluster/kubemark/util.sh"
source "${KUBE_ROOT}/cluster/lib/util.sh" source "${KUBE_ROOT}/cluster/lib/util.sh"

View File

@ -14,9 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
curl https://sdk.cloud.google.com 2> /dev/null | bash # This script assumes that kubectl binary is present in PATH.
sudo gcloud components update kubectl -q
sudo ln -s /usr/local/share/google/google-cloud-sdk/bin/kubectl /bin/
kubectl config set-cluster hollow-cluster --server=http://localhost:8080 --insecure-skip-tls-verify=true kubectl config set-cluster hollow-cluster --server=http://localhost:8080 --insecure-skip-tls-verify=true
kubectl config set-credentials $(whoami) kubectl config set-credentials $(whoami)
kubectl config set-context hollow-context --cluster=hollow-cluster --user=$(whoami) kubectl config set-context hollow-context --cluster=hollow-cluster --user=$(whoami)

View File

@ -80,7 +80,7 @@
"command": [ "command": [
"/bin/sh", "/bin/sh",
"-c", "-c",
"./kubemark.sh --morph=kubelet --name=$(NODE_NAME) --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --v=2 1>>/var/logs/kubelet_$(MY_POD_NAME).log 2>&1" "./kubemark.sh --morph=kubelet --name=$(NODE_NAME) --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --v=2 1>>/var/logs/kubelet_$(NODE_NAME).log 2>&1"
], ],
"volumeMounts": [ "volumeMounts": [
{ {
@ -129,7 +129,7 @@
"command": [ "command": [
"/bin/sh", "/bin/sh",
"-c", "-c",
"./kubemark.sh --morph=proxy --name=$(NODE_NAME) --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --v=2 1>>/var/logs/kube_proxy_$(MY_POD_NAME).log 2>&1" "./kubemark.sh --morph=proxy --name=$(NODE_NAME) --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --v=2 1>>/var/logs/kubeproxy_$(NODE_NAME).log 2>&1"
], ],
"volumeMounts": [ "volumeMounts": [
{ {

View File

@ -22,8 +22,9 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
# We need an absolute path to KUBE_ROOT # We need an absolute path to KUBE_ROOT
ABSOLUTE_ROOT=$(readlink -f ${KUBE_ROOT}) ABSOLUTE_ROOT=$(readlink -f ${KUBE_ROOT})
source ${KUBE_ROOT}/cluster/kubemark/util.sh source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
source ${KUBE_ROOT}/cluster/kubemark/config-default.sh source "${KUBE_ROOT}/cluster/kubemark/util.sh"
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"
echo "Kubemark master name: ${MASTER_NAME}" echo "Kubemark master name: ${MASTER_NAME}"

View File

@ -14,15 +14,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# Script that creates a Kubemark cluster with Master running on GCE. # Script that creates a Kubemark cluster for any given cloud provider.
# Hack to make it work for OS X. Ugh...
TMP_ROOT="$(dirname "${BASH_SOURCE}")/../.." TMP_ROOT="$(dirname "${BASH_SOURCE}")/../.."
KUBE_ROOT=$(readlink -e ${TMP_ROOT} 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' ${TMP_ROOT}) KUBE_ROOT=$(readlink -e ${TMP_ROOT} 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' ${TMP_ROOT})
source "${KUBE_ROOT}/test/kubemark/common.sh" source "${KUBE_ROOT}/test/kubemark/common.sh"
function writeEnvironmentFile() { # Write all environment variables that we need to pass to the kubemark master,
# locally to the file ${RESOURCE_DIRECTORY}/kubemark-master-env.sh.
function create-master-environment-file {
cat > "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" <<EOF cat > "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" <<EOF
# Generic variables. # Generic variables.
INSTANCE_PREFIX="${INSTANCE_PREFIX:-}" INSTANCE_PREFIX="${INSTANCE_PREFIX:-}"
@ -47,213 +48,134 @@ STORAGE_BACKEND="${STORAGE_BACKEND:-}"
NUM_NODES="${NUM_NODES:-}" NUM_NODES="${NUM_NODES:-}"
CUSTOM_ADMISSION_PLUGINS="${CUSTOM_ADMISSION_PLUGINS:-NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota}" CUSTOM_ADMISSION_PLUGINS="${CUSTOM_ADMISSION_PLUGINS:-NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota}"
EOF EOF
echo "Created the environment file for master."
} }
writeEnvironmentFile # Create the master instance along with all required network and disk resources.
function create-master-instance-with-resources {
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}" GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}"
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \ run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
if [ "${EVENT_PD:-false}" == "true" ]; then
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} \ ${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \ --type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}" --size "${MASTER_DISK_SIZE}"
fi
if [ "${EVENT_PD:-false}" == "true" ]; then
run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \ run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
--project "${PROJECT}" \ ${GCLOUD_COMMON_ARGS} \
--region "${REGION}" -q --type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--address "${MASTER_IP}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro,compute-rw,logging-write" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
if [ "${EVENT_PD:-false}" == "true" ]; then
echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}"
run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--disk "${MASTER_NAME}-event-pd" \
--device-name="master-event-pd"
fi
run-gcloud-compute-with-retries firewall-rules create "${INSTANCE_PREFIX}-kubemark-master-https" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--source-ranges "0.0.0.0/0" \
--target-tags "${MASTER_TAG}" \
--allow "tcp:443"
ensure-temp-dir
gen-kube-bearertoken
create-certs ${MASTER_IP}
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
HEAPSTER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
until gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" --command="ls" &> /dev/null; do
sleep 1
done
password=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))')
run-gcloud-compute-with-retries ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" \
--command="sudo mkdir /home/kubernetes -p && sudo mkdir /etc/srv/kubernetes -p && \
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/ca.crt\" && \
sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/server.cert\" && \
sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/server.key\" && \
sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.crt\" && \
sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.key\" && \
sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${HEAPSTER_TOKEN},system:heapster,uid:heapster\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${NODE_PROBLEM_DETECTOR_TOKEN},system:node-problem-detector,uid:system:node-problem-detector\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo ${password},admin,admin > /etc/srv/kubernetes/basic_auth.csv\""
run-gcloud-compute-with-retries copy-files --zone="${ZONE}" --project="${PROJECT}" \
"${SERVER_BINARY_TAR}" \
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" \
"${RESOURCE_DIRECTORY}/start-kubemark-master.sh" \
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
"${RESOURCE_DIRECTORY}/manifests/etcd.yaml" \
"${RESOURCE_DIRECTORY}/manifests/etcd-events.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-addon-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/addons/kubemark-rbac-bindings" \
"kubernetes@${MASTER_NAME}":/home/kubernetes/
gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \
--command="sudo chmod a+x /home/kubernetes/configure-kubectl.sh && \
sudo chmod a+x /home/kubernetes/start-kubemark-master.sh && \
sudo bash /home/kubernetes/start-kubemark-master.sh"
# Setup the docker image for kubemark hollow-node.
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
if [[ -z "${KUBEMARK_BIN}" ]]; then
echo 'Cannot find cmd/kubemark binary'
exit 1
fi
echo "Copying kubemark to ${MAKE_DIR}"
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
CURR_DIR=`pwd`
cd "${MAKE_DIR}"
RETRIES=3
for attempt in $(seq 1 ${RETRIES}); do
if ! make; then
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
echo "${color_red}Make failed. Exiting.${color_norm}"
exit 1
fi
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
else
break
fi fi
done
rm kubemark run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \
cd $CURR_DIR --project "${PROJECT}" \
--region "${REGION}" -q
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--address "${MASTER_IP}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro,compute-rw,logging-write" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
if [ "${EVENT_PD:-false}" == "true" ]; then
echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}"
run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--disk "${MASTER_NAME}-event-pd" \
--device-name="master-event-pd"
fi
run-gcloud-compute-with-retries firewall-rules create "${INSTANCE_PREFIX}-kubemark-master-https" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--source-ranges "0.0.0.0/0" \
--target-tags "${MASTER_TAG}" \
--allow "tcp:443"
}
# Create kubeconfig for Kubelet. # Generate certs/keys for CA, master, kubelet and kubecfg, and tokens for kubelet
KUBELET_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1 # and kubeproxy.
kind: Config function generate-pki-config {
users: ensure-temp-dir
- name: kubelet gen-kube-bearertoken
user: gen-kube-basicauth
client-certificate-data: "${KUBELET_CERT_BASE64}" create-certs ${MASTER_IP}
client-key-data: "${KUBELET_KEY_BASE64}" KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
clusters: KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
- name: kubemark NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
cluster: HEAPSTER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
certificate-authority-data: "${CA_CERT_BASE64}" echo "Generated PKI authentication data for kubemark."
server: https://${MASTER_IP} }
contexts:
- context:
cluster: kubemark
user: kubelet
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Kubeproxy. # Wait for the master to be reachable for executing commands on it. We do this by
KUBEPROXY_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1 # trying to run the bash noop(:) on the master, with 10 retries.
kind: Config function wait-for-master-reachability {
users: until gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" --command=":" &> /dev/null; do
- name: kube-proxy sleep 1
user: done
token: ${KUBE_PROXY_TOKEN} echo "Checked master reachability for remote command execution."
clusters: }
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kube-proxy
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Heapster. # Write all the relevant certs/keys/tokens to the master.
HEAPSTER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1 function write-pki-config-to-master {
kind: Config run-gcloud-compute-with-retries ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" \
users: --command="sudo mkdir /home/kubernetes -p && sudo mkdir /etc/srv/kubernetes -p && \
- name: heapster sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/ca.crt\" && \
user: sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/server.cert\" && \
token: ${HEAPSTER_TOKEN} sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/server.key\" && \
clusters: sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.crt\" && \
- name: kubemark sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.key\" && \
cluster: sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /etc/srv/kubernetes/known_tokens.csv\" && \
insecure-skip-tls-verify: true sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
server: https://${MASTER_IP} sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
contexts: sudo bash -c \"echo \"${HEAPSTER_TOKEN},system:heapster,uid:heapster\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
- context: sudo bash -c \"echo \"${NODE_PROBLEM_DETECTOR_TOKEN},system:node-problem-detector,uid:system:node-problem-detector\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
cluster: kubemark sudo bash -c \"echo ${KUBE_PASSWORD},admin,admin > /etc/srv/kubernetes/basic_auth.csv\""
user: heapster echo "Wrote PKI certs, keys, tokens and admin password to master."
name: kubemark-context }
current-context: kubemark-context")
# Create kubeconfig for NodeProblemDetector. # Copy all the necessary resource files (scripts/configs/manifests) to the master.
NPD_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1 function copy-resource-files-to-master {
kind: Config run-gcloud-compute-with-retries copy-files --zone="${ZONE}" --project="${PROJECT}" \
users: "${SERVER_BINARY_TAR}" \
- name: node-problem-detector "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" \
user: "${RESOURCE_DIRECTORY}/start-kubemark-master.sh" \
token: ${NODE_PROBLEM_DETECTOR_TOKEN} "${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
clusters: "${RESOURCE_DIRECTORY}/manifests/etcd.yaml" \
- name: kubemark "${RESOURCE_DIRECTORY}/manifests/etcd-events.yaml" \
cluster: "${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \
insecure-skip-tls-verify: true "${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \
server: https://${MASTER_IP} "${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \
contexts: "${RESOURCE_DIRECTORY}/manifests/kube-addon-manager.yaml" \
- context: "${RESOURCE_DIRECTORY}/manifests/addons/kubemark-rbac-bindings" \
cluster: kubemark "kubernetes@${MASTER_NAME}":/home/kubernetes/
user: node-problem-detector echo "Copied server binary, master startup scripts, configs and resource manifests to master."
name: kubemark-npd-context }
current-context: kubemark-npd-context")
# Create kubeconfig for local kubectl. # Make startup scripts executable and run start-kubemark-master.sh.
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark" function start-master-components {
cat > "${LOCAL_KUBECONFIG}" << EOF echo ""
gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \
--command="sudo chmod a+x /home/kubernetes/configure-kubectl.sh && \
sudo chmod a+x /home/kubernetes/start-kubemark-master.sh && \
sudo bash /home/kubernetes/start-kubemark-master.sh"
echo "The master has started and is now live."
}
# Write kubeconfig to ${RESOURCE_DIRECTORY}/kubeconfig.kubemark in order to
# use kubectl locally.
function write-local-kubeconfig {
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
cat > "${LOCAL_KUBECONFIG}" << EOF
apiVersion: v1 apiVersion: v1
kind: Config kind: Config
users: users:
@ -275,70 +197,216 @@ contexts:
name: kubemark-context name: kubemark-context
current-context: kubemark-context current-context: kubemark-context
EOF EOF
echo "Kubeconfig file for kubemark master written to ${LOCAL_KUBECONFIG}."
}
sed "s/{{numreplicas}}/${NUM_NODES:-10}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.json" > "${RESOURCE_DIRECTORY}/hollow-node.json" # Finds the right kubemark binary for 'linux/amd64' platform and uses it to
sed -i'' -e "s/{{project}}/${PROJECT}/g" "${RESOURCE_DIRECTORY}/hollow-node.json" # create a docker image for hollow-node and upload it to the appropriate
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.json" # docker container registry for the cloud provider.
# TODO(shyamjvs): Make the image upload URL and makefile variable w.r.t. provider.
mkdir "${RESOURCE_DIRECTORY}/addons" || true function create-and-upload-hollow-node-image {
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json" KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
metrics_mem_per_node=4 if [[ -z "${KUBEMARK_BIN}" ]]; then
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10})) echo 'Cannot find cmd/kubemark binary'
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
# Create kubemark namespace.
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
# Create configmap for configuring hollow- kubelet, proxy and npd.
"${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \
--from-literal=content.type="${TEST_CLUSTER_API_CONTENT_TYPE}" \
--from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
# Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}"
# Create addon pods.
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
# Create the replication controller for hollow-nodes.
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.json" --namespace="kubemark"
echo "Waiting for all HollowNodes to become Running..."
start=$(date +%s)
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
until [[ "${ready}" -ge "${NUM_NODES}" ]]; do
echo -n .
sleep 1
now=$(date +%s)
# Fail it if it already took more than 30 minutes.
if [ $((now - start)) -gt 1800 ]; then
echo ""
echo "Timeout waiting for all HollowNodes to become Running"
# Try listing nodes again - if it fails it means that API server is not responding
if "${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node &> /dev/null; then
echo "Found only ${ready} ready Nodes while waiting for ${NUM_NODES}."
else
echo "Got error while trying to list Nodes. Probably API server is down."
fi
pods=$("${KUBECTL}" get pods --namespace=kubemark) || true
running=$(($(echo "${pods}" | grep "Running" | wc -l)))
echo "${running} HollowNode pods are reported as 'Running'"
not_running=$(($(echo "${pods}" | grep -v "Running" | wc -l) - 1))
echo "${not_running} HollowNode pods are reported as NOT 'Running'"
echo $(echo "${pods}" | grep -v "Running")
exit 1 exit 1
fi fi
echo "Copying kubemark binary to ${MAKE_DIR}"
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
CURR_DIR=`pwd`
cd "${MAKE_DIR}"
RETRIES=3
for attempt in $(seq 1 ${RETRIES}); do
if ! make; then
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
echo "${color_red}Make failed. Exiting.${color_norm}"
exit 1
fi
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
else
break
fi
done
rm kubemark
cd $CURR_DIR
echo "Created and uploaded the kubemark hollow-node image to docker registry."
}
# Generate secret and configMap for the hollow-node pods to work, prepare
# manifests of the hollow-node and heapster replication controllers from
# templates, and finally create these resources through kubectl.
function create-kube-hollow-node-resources {
# Create kubeconfig for Kubelet.
KUBELET_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: "${KUBELET_CERT_BASE64}"
client-key-data: "${KUBELET_KEY_BASE64}"
clusters:
- name: kubemark
cluster:
certificate-authority-data: "${CA_CERT_BASE64}"
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kubelet
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Kubeproxy.
KUBEPROXY_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kube-proxy
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Heapster.
HEAPSTER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: heapster
user:
token: ${HEAPSTER_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: heapster
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for NodeProblemDetector.
NPD_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: node-problem-detector
user:
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: node-problem-detector
name: kubemark-context
current-context: kubemark-context")
# Create kubemark namespace.
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
# Create configmap for configuring hollow- kubelet, proxy and npd.
"${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \
--from-literal=content.type="${TEST_CLUSTER_API_CONTENT_TYPE}" \
--from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
# Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}"
# Create addon pods.
# TODO(shyamjvs): Make path to docker image variable in heapster_template.json.
mkdir -p "${RESOURCE_DIRECTORY}/addons"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
# Create the replication controller for hollow-nodes.
# TODO(shyamjvs): Make path to docker image variable in hollow-node_template.json.
sed "s/{{numreplicas}}/${NUM_NODES:-10}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.json" > "${RESOURCE_DIRECTORY}/hollow-node.json"
sed -i'' -e "s/{{project}}/${PROJECT}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.json" --namespace="kubemark"
echo "Created secrets, configMaps, replication-controllers required for hollow-nodes."
}
# Wait until all hollow-nodes are running or there is a timeout.
function wait-for-hollow-nodes-to-run-or-timeout {
echo -n "Waiting for all hollow-nodes to become Running"
start=$(date +%s)
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1)) ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
done
echo "" until [[ "${ready}" -ge "${NUM_NODES}" ]]; do
echo -n "."
sleep 1
now=$(date +%s)
# Fail it if it already took more than 30 minutes.
if [ $((now - start)) -gt 1800 ]; then
echo ""
echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}"
# Try listing nodes again - if it fails it means that API server is not responding
if "${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node &> /dev/null; then
echo "Found only ${ready} ready hollow-nodes while waiting for ${NUM_NODES}."
else
echo "Got error while trying to list hollow-nodes. Probably API server is down."
fi
pods=$("${KUBECTL}" get pods --namespace=kubemark) || true
running=$(($(echo "${pods}" | grep "Running" | wc -l)))
echo "${running} hollow-nodes are reported as 'Running'"
not_running=$(($(echo "${pods}" | grep -v "Running" | wc -l) - 1))
echo "${not_running} hollow-nodes are reported as NOT 'Running'"
echo $(echo "${pods}" | grep -v "Running")
exit 1
fi
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
done
echo -e "${color_green} Done!${color_norm}"
}
############################### Main Function ########################################
# Setup for master.
echo -e "${color_yellow}STARTING SETUP FOR MASTER${color_norm}"
create-master-environment-file
create-master-instance-with-resources
generate-pki-config
wait-for-master-reachability
write-pki-config-to-master
copy-resource-files-to-master
start-master-components
# Setup for hollow-nodes.
echo ""
echo -e "${color_yellow}STARTING SETUP FOR HOLLOW-NODES${color_norm}"
write-local-kubeconfig
create-and-upload-hollow-node-image
create-kube-hollow-node-resources
wait-for-hollow-nodes-to-run-or-timeout
echo ""
echo "Master IP: ${MASTER_IP}" echo "Master IP: ${MASTER_IP}"
echo "Password to kubemark master: ${password}" echo "Password to kubemark master: ${KUBE_PASSWORD}"
echo "Kubeconfig for kubemark master is written in ${LOCAL_KUBECONFIG}" echo "Kubeconfig for kubemark master is written in ${LOCAL_KUBECONFIG}"

View File

@ -14,15 +14,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# Script that destroys Kubemark clusters and deletes all GCE resources created for Master # Script that destroys Kubemark cluster and deletes all master resources.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/test/kubemark/common.sh" source "${KUBE_ROOT}/test/kubemark/common.sh"
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/hollow-kubelet.json" &> /dev/null || true
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/addons" &> /dev/null || true "${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/addons" &> /dev/null || true
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/hollow-node.json" &> /dev/null || true
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/kubemark-ns.json" &> /dev/null || true "${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/kubemark-ns.json" &> /dev/null || true
rm -rf "${RESOURCE_DIRECTORY}/addons"
rm -rf "${RESOURCE_DIRECTORY}/addons" \
"${RESOURCE_DIRECTORY}/kubeconfig.kubemark" \
"${RESOURCE_DIRECTORY}/hollow-node.json" \
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" &> /dev/null || true
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE} --quiet" GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE} --quiet"
@ -51,10 +56,3 @@ if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then
gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \ gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true ${GCLOUD_COMMON_ARGS} || true
fi fi
rm -rf "${RESOURCE_DIRECTORY}/addons" "${RESOURCE_DIRECTORY}/kubeconfig.kubemark" &> /dev/null || true
rm "${RESOURCE_DIRECTORY}/ca.crt" \
"${RESOURCE_DIRECTORY}/kubecfg.crt" \
"${RESOURCE_DIRECTORY}/kubecfg.key" \
"${RESOURCE_DIRECTORY}/hollow-node.json" \
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" &> /dev/null || true