From 86468cd29d0dd9f1724815e22ea9b3e6bec835bc Mon Sep 17 00:00:00 2001 From: Zach Loafman Date: Wed, 22 Apr 2015 10:55:08 -0700 Subject: [PATCH] Revert "Added kube-proxy token." --- .../templates/create-dynamic-salt-files.sh | 9 ++------- cluster/gce/configure-vm.sh | 19 ++++++------------- cluster/gce/util.sh | 13 +++++-------- cluster/vagrant/provision-master.sh | 5 +---- 4 files changed, 14 insertions(+), 32 deletions(-) diff --git a/cluster/aws/templates/create-dynamic-salt-files.sh b/cluster/aws/templates/create-dynamic-salt-files.sh index 031834d112..33050b3591 100644 --- a/cluster/aws/templates/create-dynamic-salt-files.sh +++ b/cluster/aws/templates/create-dynamic-salt-files.sh @@ -40,19 +40,14 @@ mkdir -p /srv/salt-overlay/salt/nginx echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd # Generate and distribute a shared secret (bearer token) to -# apiserver and nodes so that kubelet/kube-proxy can authenticate to +# apiserver and kubelet so that kubelet can authenticate to # apiserver to send events. # This works on CoreOS, so it should work on a lot of distros. kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null) -# Same thing for kube-proxy. -kube_proxy_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null) -# Make a list of tokens and usernames to be pushed to the apiserver mkdir -p /srv/salt-overlay/salt/kube-apiserver known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" -(umask u=rw,go= ; echo "" > $known_tokens_file) -echo "$kubelet_token,kubelet,kubelet" >> $known_tokens_file ; -echo "$kube_proxy_token,kube_proxy,kube_proxy" >> $known_tokens_file +(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file) mkdir -p /srv/salt-overlay/salt/kubelet kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index b894f842a8..7424c445b6 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -73,23 +73,17 @@ for k,v in yaml.load(sys.stdin).iteritems(): fi } -function ensure-kube-tokens() { +function ensure-kube-token() { # We bake the KUBELET_TOKEN in separately to avoid auth information # having to be re-communicated on kube-push. (Otherwise the client # has to keep the bearer token around to handle generating a valid # kube-env.) if [[ -z "${KUBELET_TOKEN:-}" ]] && [[ ! -e "${KNOWN_TOKENS_FILE}" ]]; then - until KUBELET_TOKEN=$(curl-metadata kubelet-token); do + until KUBELET_TOKEN=$(curl-metadata kube-token); do echo 'Waiting for metadata KUBELET_TOKEN...' sleep 3 done fi - if [[ -z "${KUBE_PROXY_TOKEN:-}" ]] && [[ ! -e "${KNOWN_TOKENS_FILE}" ]]; then - until KUBE_PROXY_TOKEN=$(curl-metadata kube-proxy-token); do - echo 'Waiting for metadata KUBE_PROXY_TOKEN...' - sleep 3 - done - fi } function remove-docker-artifacts() { @@ -258,7 +252,7 @@ EOF # This should only happen on cluster initialization. Uses # MASTER_HTPASSWORD to generate the nginx/htpasswd file, and the -# KUBELET_TOKEN and KUBE_PROXY_TOKEN, to generate known_tokens.csv +# KUBELET_TOKEN, plus /dev/urandom, to generate known_tokens.csv # (KNOWN_TOKENS_FILE). After the first boot and on upgrade, these # files exist on the master-pd and should never be touched again # (except perhaps an additional service account, see NB below.) @@ -272,9 +266,8 @@ function create-salt-auth() { if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then mkdir -p /srv/salt-overlay/salt/kube-apiserver - (umask 077; echo "" > "${KNOWN_TOKENS_FILE}") - echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}" - echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${KNOWN_TOKENS_FILE}" + (umask 077; + echo "${KUBELET_TOKEN},kubelet,kubelet" > "${KNOWN_TOKENS_FILE}") mkdir -p /srv/salt-overlay/salt/kubelet kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" @@ -429,7 +422,7 @@ if [[ -z "${is_push}" ]]; then ensure-install-dir set-kube-env [[ "${KUBERNETES_MASTER}" == "true" ]] && mount-master-pd - ensure-kube-tokens + ensure-kube-token create-salt-pillar create-salt-auth download-release diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 25c7d5f8a4..33c59c4692 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -594,12 +594,11 @@ function kube-up { --zone "${ZONE}" \ --size "10GB" - # Generate a bearer token for kubelets in this cluster. We push this - # separately from the other cluster variables so that the client (this + # Generate a bearer token for this cluster. We push this separately + # from the other cluster variables so that the client (this # computer) can forget it later. This should disappear with # https://github.com/GoogleCloudPlatform/kubernetes/issues/3168 KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) # Reserve the master's IP so that it can later be transferred to another VM # without disrupting the kubelets. IPs are associated with regions, not zones, @@ -626,8 +625,7 @@ function kube-up { # Wait for last batch of jobs wait-for-jobs - add-instance-metadata "${MASTER_NAME}" "kubelet-token=${KUBELET_TOKEN}" - add-instance-metadata "${MASTER_NAME}" "kube-proxy-token=${KUBE_PROXY_TOKEN}" + add-instance-metadata "${MASTER_NAME}" "kube-token=${KUBELET_TOKEN}" echo "Creating minions." @@ -642,8 +640,7 @@ function kube-up { create-node-template "${NODE_INSTANCE_PREFIX}-template" "${scope_flags[*]}" \ "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \ "kube-env=${KUBE_TEMP}/node-kube-env.yaml" \ - "kubelet-token=${KUBELET_TOKEN}" \ - "kube-proxy-token=${KUBE_PROXY_TOKEN}" + "kube-token=${KUBELET_TOKEN}" gcloud preview managed-instance-groups --zone "${ZONE}" \ create "${NODE_INSTANCE_PREFIX}-group" \ @@ -881,7 +878,7 @@ function kube-push { # TODO(zmerlynn): Re-create instance-template with the new # node-kube-env. This isn't important until the node-ip-range issue # is solved (because that's blocking automatic dynamic nodes from - # working). The node-kube-env has to be composed with the kube*-token + # working). The node-kube-env has to be composed with the kube-token # metadata. Ideally we would have # https://github.com/GoogleCloudPlatform/kubernetes/issues/3168 # implemented before then, though, so avoiding this mess until then. diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index 3f58779796..0175cf70ed 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -137,13 +137,10 @@ EOF known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" if [[ ! -f "${known_tokens_file}" ]]; then kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null) - kube_proxy_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null) mkdir -p /srv/salt-overlay/salt/kube-apiserver known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" - (umask u=rw,go= ; echo "" > $known_tokens_file) - echo "$kubelet_token,kubelet,kubelet" >> $known_tokens_file - echo "$kube_proxy_token,kube-proxy,kube-proxy" >> $known_tokens_file + (umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file) mkdir -p /srv/salt-overlay/salt/kubelet kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"