Merge pull request #2608 from jbeda/gcloud

Convert GCE scripts from gcutil to 'gcloud compute'
pull/6/head
Eric Tune 2014-11-25 15:26:45 -08:00
commit bea2a506c1
11 changed files with 174 additions and 194 deletions

View File

@ -20,7 +20,7 @@ MASTER_SIZE=n1-standard-1
MINION_SIZE=n1-standard-1 MINION_SIZE=n1-standard-1
NUM_MINIONS=4 NUM_MINIONS=4
# TODO(dchen1107): Filed an internal issue to create an alias # TODO(dchen1107): Filed an internal issue to create an alias
# for containervm image, so that gcloud/gcutil will expand this # for containervm image, so that gcloud will expand this
# to the latest supported image. # to the latest supported image.
IMAGE=container-vm-v20141016 IMAGE=container-vm-v20141016
IMAGE_PROJECT=google-containers IMAGE_PROJECT=google-containers
@ -31,7 +31,7 @@ MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion" MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES="storage-ro,compute-rw" MINION_SCOPES=("storage-ro" "compute-rw")
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16" PORTAL_NET="10.0.0.0/16"

View File

@ -20,7 +20,7 @@ MASTER_SIZE=g1-small
MINION_SIZE=g1-small MINION_SIZE=g1-small
NUM_MINIONS=2 NUM_MINIONS=2
# TODO(dchen1107): Filed an internal issue to create an alias # TODO(dchen1107): Filed an internal issue to create an alias
# for containervm image, so that gcloud/gcutil will expand this # for containervm image, so that gcloud will expand this
# to the latest supported image. # to the latest supported image.
IMAGE=container-vm-v20141016 IMAGE=container-vm-v20141016
IMAGE_PROJECT=google-containers IMAGE_PROJECT=google-containers
@ -31,7 +31,7 @@ MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion" MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.245.{1..${NUM_MINIONS}}.0/24")) MINION_IP_RANGES=($(eval echo "10.245.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES="storage-ro,compute-rw" MINION_SCOPES=("storage-ro" "compute-rw")
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16" PORTAL_NET="10.0.0.0/16"

View File

@ -24,7 +24,7 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
# Verify prereqs # Verify prereqs
function verify-prereqs { function verify-prereqs {
local cmd local cmd
for cmd in gcloud gcutil gsutil; do for cmd in gcloud gsutil; do
which "${cmd}" >/dev/null || { which "${cmd}" >/dev/null || {
echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud " echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud "
echo "SDK can be downloaded from https://cloud.google.com/sdk/." echo "SDK can be downloaded from https://cloud.google.com/sdk/."
@ -143,10 +143,9 @@ function upload-server-tars() {
function detect-minions () { function detect-minions () {
KUBE_MINION_IP_ADDRESSES=() KUBE_MINION_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# gcutil will print the "external-ip" column header even if no instances are found local minion_ip=$(gcloud compute instances describe --zone "${ZONE}" \
local minion_ip=$(gcutil listinstances --format=csv --sort=external-ip \ "${MINION_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \
--columns=external-ip --zone ${ZONE} --filter="name eq ${MINION_NAMES[$i]}" \ --format=text | awk '{ print $2 }')
| tail -n '+2' | tail -n 1)
if [[ -z "${minion_ip-}" ]] ; then if [[ -z "${minion_ip-}" ]] ; then
echo "Did not find ${MINION_NAMES[$i]}" >&2 echo "Did not find ${MINION_NAMES[$i]}" >&2
else else
@ -171,10 +170,9 @@ function detect-minions () {
function detect-master () { function detect-master () {
KUBE_MASTER=${MASTER_NAME} KUBE_MASTER=${MASTER_NAME}
if [[ -z "${KUBE_MASTER_IP-}" ]]; then if [[ -z "${KUBE_MASTER_IP-}" ]]; then
# gcutil will print the "external-ip" column header even if no instances are found KUBE_MASTER_IP=$(gcloud compute instances describe --zone "${ZONE}" \
KUBE_MASTER_IP=$(gcutil listinstances --format=csv --sort=external-ip \ "${MASTER_NAME}" --fields networkInterfaces[0].accessConfigs[0].natIP \
--columns=external-ip --zone ${ZONE} --filter="name eq ${MASTER_NAME}" \ --format=text | awk '{ print $2 }')
| tail -n '+2' | tail -n 1)
fi fi
if [[ -z "${KUBE_MASTER_IP-}" ]]; then if [[ -z "${KUBE_MASTER_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2 echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
@ -245,41 +243,35 @@ function kube-up {
local htpasswd local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd") htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
if ! gcutil getnetwork "${NETWORK}" >/dev/null 2>&1; then if ! gcloud compute networks describe "${NETWORK}" &>/dev/null; then
echo "Creating new network for: ${NETWORK}" echo "Creating new network: ${NETWORK}"
# The network needs to be created synchronously or we have a race. The # The network needs to be created synchronously or we have a race. The
# firewalls can be added concurrent with instance creation. # firewalls can be added concurrent with instance creation.
gcutil addnetwork "${NETWORK}" --range "10.240.0.0/16" gcloud compute networks create "${NETWORK}" --range "10.240.0.0/16"
fi fi
if ! gcutil getfirewall "${NETWORK}-default-internal" >/dev/null 2>&1; then if ! gcloud compute firewall-rules describe "${NETWORK}-default-internal" &>/dev/null; then
gcutil addfirewall "${NETWORK}-default-internal" \ gcloud compute firewall-rules create "${NETWORK}-default-internal" \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--allowed_ip_sources "10.0.0.0/8" \ --source-ranges "10.0.0.0/8" \
--allowed "tcp:1-65535,udp:1-65535,icmp" & --allow "tcp:1-65535" "udp:1-65535" "icmp" &
fi fi
if ! gcutil getfirewall "${NETWORK}-default-ssh" >/dev/null 2>&1; then if ! gcloud compute firewall-rules describe "${NETWORK}-default-ssh" &>/dev/null; then
gcutil addfirewall "${NETWORK}-default-ssh" \ gcloud compute firewall-rules create "${NETWORK}-default-ssh" \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--allowed_ip_sources "0.0.0.0/0" \ --source-ranges "0.0.0.0/0" \
--allowed "tcp:22" & --allow "tcp:22" &
fi fi
echo "Starting VMs and configuring firewalls" echo "Starting VMs and configuring firewalls"
gcutil addfirewall "${MASTER_NAME}-https" \ gcloud compute firewall-rules create "${MASTER_NAME}-https" \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--target_tags "${MASTER_TAG}" \ --target-tags "${MASTER_TAG}" \
--allowed tcp:443 & --allow tcp:443 &
( (
echo "#! /bin/bash" echo "#! /bin/bash"
@ -312,18 +304,16 @@ function kube-up {
MINION_SCOPES="${MINION_SCOPES}, https://www.googleapis.com/auth/logging.write" MINION_SCOPES="${MINION_SCOPES}, https://www.googleapis.com/auth/logging.write"
fi fi
gcutil addinstance "${MASTER_NAME}" \ gcloud compute instances create "${MASTER_NAME}" \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--zone "${ZONE}" \ --zone "${ZONE}" \
--machine_type "${MASTER_SIZE}" \ --machine-type "${MASTER_SIZE}" \
--image "projects/${IMAGE_PROJECT}/global/images/${IMAGE}" \ --image-project="${IMAGE_PROJECT}" \
--image "${IMAGE}" \
--tags "${MASTER_TAG}" \ --tags "${MASTER_TAG}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--service_account_scopes="storage-ro,compute-rw" \ --scopes "storage-ro" "compute-rw" \
--automatic_restart \ --metadata-from-file "startup-script=${KUBE_TEMP}/master-start.sh" &
--metadata_from_file "startup-script:${KUBE_TEMP}/master-start.sh" &
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
( (
@ -336,34 +326,36 @@ function kube-up {
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/salt-minion.sh" grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh" ) > "${KUBE_TEMP}/minion-start-${i}.sh"
gcutil addfirewall "${MINION_NAMES[$i]}-all" \ gcloud compute firewall-rules create "${MINION_NAMES[$i]}-all" \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--allowed_ip_sources "${MINION_IP_RANGES[$i]}" \ --source-ranges "${MINION_IP_RANGES[$i]}" \
--allowed "tcp,udp,icmp,esp,ah,sctp" & --allow tcp udp icmp esp ah sctp &
gcutil addinstance ${MINION_NAMES[$i]} \ local -a scope_flags=()
if (( "${#MINION_SCOPES[@]}" > 0 )); then
scope_flags=("--scopes" "${MINION_SCOPES[@]}")
else
scope_flags=("--no-scopes")
fi
gcloud compute instances create ${MINION_NAMES[$i]} \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--zone "${ZONE}" \ --zone "${ZONE}" \
--machine_type "${MINION_SIZE}" \ --machine-type "${MINION_SIZE}" \
--image "projects/${IMAGE_PROJECT}/global/images/${IMAGE}" \ --image-project="${IMAGE_PROJECT}" \
--image "${IMAGE}" \
--tags "${MINION_TAG}" \ --tags "${MINION_TAG}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--service_account_scopes "${MINION_SCOPES}" \ "${scope_flags[@]}" \
--automatic_restart \ --can-ip-forward \
--can_ip_forward \ --metadata-from-file "startup-script=${KUBE_TEMP}/minion-start-${i}.sh" &
--metadata_from_file "startup-script:${KUBE_TEMP}/minion-start-${i}.sh" &
gcutil addroute "${MINION_NAMES[$i]}" "${MINION_IP_RANGES[$i]}" \ gcloud compute routes create "${MINION_NAMES[$i]}" \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \ --destination-range "${MINION_IP_RANGES[$i]}" \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--next_hop_instance "${ZONE}/instances/${MINION_NAMES[$i]}" & --next-hop-instance "${MINION_NAMES[$i]}" \
--next-hop-instance-zone "${ZONE}" &
done done
local fail=0 local fail=0
@ -376,7 +368,7 @@ function kube-up {
exit 2 exit 2
fi fi
detect-master > /dev/null detect-master
echo "Waiting for cluster initialization." echo "Waiting for cluster initialization."
echo echo
@ -401,7 +393,7 @@ function kube-up {
local rc # Capture return code without exiting because of errexit bash option local rc # Capture return code without exiting because of errexit bash option
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed # Make sure docker is installed
gcutil ssh "${MINION_NAMES[$i]}" which docker >/dev/null || { gcloud compute ssh --zone "$ZONE" "${MINION_NAMES[$i]}" --command "which docker" >/dev/null || {
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo "cluster. (sorry!)" >&2 echo "cluster. (sorry!)" >&2
@ -424,9 +416,9 @@ function kube-up {
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done. # config file. Distribute the same way the htpasswd is done.
(umask 077 (umask 077
gcutil ssh "${MASTER_NAME}" sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null gcloud compute ssh --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/kubecfg.crt" >"${HOME}/${kube_cert}" 2>/dev/null
gcutil ssh "${MASTER_NAME}" sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null gcloud compute ssh --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/kubecfg.key" >"${HOME}/${kube_key}" 2>/dev/null
gcutil ssh "${MASTER_NAME}" sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null gcloud compute ssh --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/ca.crt" >"${HOME}/${ca_cert}" 2>/dev/null
cat << EOF > ~/.kubernetes_auth cat << EOF > ~/.kubernetes_auth
{ {
@ -449,47 +441,34 @@ function kube-down {
detect-project detect-project
echo "Bringing down cluster" echo "Bringing down cluster"
gcutil deletefirewall \ gcloud compute firewall-rules delete \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \ --quiet \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--force \
"${MASTER_NAME}-https" & "${MASTER_NAME}-https" &
gcutil deleteinstance \ local minion
--project "${PROJECT}" \ for minion in "${MINION_NAMES[@]}"; do
--norespect_terminal_width \ gcloud compute firewall-rules delete \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --project "${PROJECT}" \
--force \ --quiet \
--delete_boot_pd \ "${minion}-all" &
--zone "${ZONE}" \
"${MASTER_NAME}" &
gcutil deletefirewall \ gcloud compute routes delete \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \ --quiet \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ "${minion}" &
--force \ done
"${MINION_NAMES[@]/%/-all}" &
gcutil deleteinstance \ for minion in "${MASTER_NAME}" "${MINION_NAMES[@]}"; do
--project "${PROJECT}" \ gcloud compute instances delete \
--norespect_terminal_width \ --project "${PROJECT}" \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --quiet \
--force \ --delete-disks all \
--delete_boot_pd \ --zone "${ZONE}" \
--zone "${ZONE}" \ "${minion}" &
"${MINION_NAMES[@]}" & done
gcutil deleteroute \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--force \
"${MINION_NAMES[@]}" &
wait wait
} }
# Update a kubernetes cluster with latest source # Update a kubernetes cluster with latest source
@ -512,7 +491,7 @@ function kube-push {
echo "echo Executing configuration" echo "echo Executing configuration"
echo "sudo salt '*' mine.update" echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate" echo "sudo salt --force-color '*' state.highstate"
) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash ) | gcloud compute ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" --command "sudo bash"
get-password get-password
@ -551,12 +530,10 @@ function test-setup {
detect-project detect-project
# Open up port 80 & 8080 so common containers on minions can be reached # Open up port 80 & 8080 so common containers on minions can be reached
gcutil addfirewall \ gcloud compute firewall-rules create \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \ --target-tags "${MINION_TAG}" \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --allow tcp:80 tcp:8080 \
--target_tags "${MINION_TAG}" \
--allowed tcp:80,tcp:8080 \
--network "${NETWORK}" \ --network "${NETWORK}" \
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt"
} }
@ -568,20 +545,18 @@ function test-setup {
# PROJECT # PROJECT
function test-teardown { function test-teardown {
echo "Shutting down test cluster in background." echo "Shutting down test cluster in background."
gcutil deletefirewall \ gcloud compute firewall-rules delete \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \ --quiet \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true
--force \ "${KUBE_ROOT}/cluster/kube-down.sh"
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true > /dev/null
"${KUBE_ROOT}/cluster/kube-down.sh" > /dev/null
} }
# SSH to a node by name ($1) and run a command ($2). # SSH to a node by name ($1) and run a command ($2).
function ssh-to-node { function ssh-to-node {
local node="$1" local node="$1"
local cmd="$2" local cmd="$2"
gcutil --log_level=WARNING ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}" gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --zone="${ZONE}" "${node}" --command "${cmd}"
} }
# Restart the kube-proxy on a node ($1) # Restart the kube-proxy on a node ($1)
@ -592,41 +567,39 @@ function restart-kube-proxy {
# Setup monitoring using heapster and InfluxDB # Setup monitoring using heapster and InfluxDB
function setup-monitoring { function setup-monitoring {
if [[ "${MONITORING}" == "true" ]]; then if [[ "${MONITORING}" == "true" ]]; then
echo "Setting up cluster monitoring using Heapster." echo "Setting up cluster monitoring using Heapster."
if ! gcutil getfirewall monitoring-heapster &> /dev/null; then if ! gcloud compute firewall-rules describe monitoring-heapster &>/dev/null; then
if ! gcutil addfirewall monitoring-heapster \ if ! gcloud compute firewall-rules create monitoring-heapster \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \ --target-tags="${MINION_TAG}" \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ --allow tcp:80 tcp:8083 tcp:8086 tcp:9200; then
--target_tags="${MINION_TAG}" \ echo "Failed to set up firewall for monitoring" && false
--allowed "tcp:80,tcp:8083,tcp:8086,tcp:9200" &> /dev/null; then fi
echo "Failed to set up firewall for monitoring" && false fi
fi
fi
# Re-use master auth for Grafana # Re-use master auth for Grafana
get-password get-password
ensure-temp-dir ensure-temp-dir
cp "${KUBE_ROOT}/examples/monitoring/influx-grafana-pod.json" "${KUBE_TEMP}/influx-grafana-pod.0.json" cp "${KUBE_ROOT}/examples/monitoring/influx-grafana-pod.json" "${KUBE_TEMP}/influx-grafana-pod.0.json"
sed "s/HTTP_USER, \"value\": \"[^\"]*\"/HTTP_USER, \"value\": \"$KUBE_USER\"/g" \ sed "s/HTTP_USER, \"value\": \"[^\"]*\"/HTTP_USER, \"value\": \"$KUBE_USER\"/g" \
"${KUBE_TEMP}/influx-grafana-pod.0.json" > "${KUBE_TEMP}/influx-grafana-pod.1.json" "${KUBE_TEMP}/influx-grafana-pod.0.json" > "${KUBE_TEMP}/influx-grafana-pod.1.json"
sed "s/HTTP_PASS, \"value\": \"[^\"]*\"/HTTP_PASS, \"value\": \"$KUBE_PASSWORD\"/g" \ sed "s/HTTP_PASS, \"value\": \"[^\"]*\"/HTTP_PASS, \"value\": \"$KUBE_PASSWORD\"/g" \
"${KUBE_TEMP}/influx-grafana-pod.1.json" > "${KUBE_TEMP}/influx-grafana-pod.2.json" "${KUBE_TEMP}/influx-grafana-pod.1.json" > "${KUBE_TEMP}/influx-grafana-pod.2.json"
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
if "${kubectl}" create -f "${KUBE_TEMP}/influx-grafana-pod.2.json" &> /dev/null \ if "${kubectl}" create -f "${KUBE_TEMP}/influx-grafana-pod.2.json" &> /dev/null \
&& "${kubectl}" create -f "${KUBE_ROOT}/examples/monitoring/influx-grafana-service.json" &> /dev/null \ && "${kubectl}" create -f "${KUBE_ROOT}/examples/monitoring/influx-grafana-service.json" &> /dev/null \
&& "${kubectl}" create -f "${KUBE_ROOT}/examples/monitoring/heapster-pod.json" &> /dev/null; then && "${kubectl}" create -f "${KUBE_ROOT}/examples/monitoring/heapster-pod.json" &> /dev/null; then
local dashboard_url="http://$(${kubectl} get -o json pod influx-grafana | grep hostIP | awk '{print $2}' | sed 's/[,|\"]//g')" local dashboard_url="http://$(${kubectl} get -o json pod influx-grafana | grep hostIP | awk '{print $2}' | sed 's/[,|\"]//g')"
echo echo
echo "Grafana dashboard will be available at $dashboard_url. Wait for the monitoring dashboard to be online." echo "Grafana dashboard will be available at $dashboard_url. Wait for the monitoring dashboard to be online."
echo "Use the master user name and password for the dashboard." echo "Use the master user name and password for the dashboard."
echo echo
else else
echo "Failed to Setup Monitoring" echo "Failed to Setup Monitoring"
teardown-monitoring teardown-monitoring
fi fi
fi fi
} }
@ -638,13 +611,11 @@ function teardown-monitoring {
"${kubectl}" delete pods heapster &> /dev/null || true "${kubectl}" delete pods heapster &> /dev/null || true
"${kubectl}" delete pods influx-grafana &> /dev/null || true "${kubectl}" delete pods influx-grafana &> /dev/null || true
"${kubectl}" delete services influx-master &> /dev/null || true "${kubectl}" delete services influx-master &> /dev/null || true
if gcutil getfirewall monitoring-heapster &> /dev/null; then if gcloud compute firewall-rules describe monitoring-heapster &> /dev/null; then
gcutil deletefirewall \ gcloud compute firewall-rules delete \
--project "${PROJECT}" \ --project "${PROJECT}" \
--norespect_terminal_width \ --quiet \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ monitoring-heapster &> /dev/null || true
--force \
monitoring-heapster &> /dev/null || true
fi fi
fi fi
} }

View File

@ -40,11 +40,12 @@ We set up this bridge on each node with SaltStack, in [container_bridge.py](clus
We make these addresses routable in GCE: We make these addresses routable in GCE:
gcutil addroute ${MINION_NAMES[$i]} ${MINION_IP_RANGES[$i]} \ gcloud compute routes add "${MINION_NAMES[$i]}" \
--norespect_terminal_width \ --project "${PROJECT}" \
--project ${PROJECT} \ --destination-range "${MINION_IP_RANGES[$i]}" \
--network ${NETWORK} \ --network "${NETWORK}" \
--next_hop_instance ${ZONE}/instances/${MINION_NAMES[$i]} & --next-hop-instance "${MINION_NAMES[$i]}" \
--next-hop-instance-zone "${ZONE}" &
The minion IP ranges are /24s in the 10-dot space. The minion IP ranges are /24s in the 10-dot space.

View File

@ -9,14 +9,14 @@ The example below creates a Kubernetes cluster with 4 worker node Virtual Machin
2. Make sure you can start up a GCE VM. At least make sure you can do the [Create an instance](https://developers.google.com/compute/docs/quickstart#addvm) part of the GCE Quickstart. 2. Make sure you can start up a GCE VM. At least make sure you can do the [Create an instance](https://developers.google.com/compute/docs/quickstart#addvm) part of the GCE Quickstart.
3. Make sure you can ssh into the VM without interactive prompts. 3. Make sure you can ssh into the VM without interactive prompts.
* Your GCE SSH key must either have no passcode or you need to be using `ssh-agent`. * Your GCE SSH key must either have no passcode or you need to be using `ssh-agent`.
* Ensure the GCE firewall isn't blocking port 22 to your VMs. By default, this should work but if you have edited firewall rules or created a new non-default network, you'll need to expose it: `gcutil addfirewall --network=<network-name> --description "SSH allowed from anywhere" --allowed=tcp:22 default-ssh` * Ensure the GCE firewall isn't blocking port 22 to your VMs. By default, this should work but if you have edited firewall rules or created a new non-default network, you'll need to expose it: `gcloud compute firewall-rules create --network=<network-name> --description "SSH allowed from anywhere" --allow tcp:22 default-ssh`
4. You need to have the Google Cloud Storage API, and the Google Cloud Storage JSON API enabled. This can be done in the Google Cloud Console. 4. You need to have the Google Cloud Storage API, and the Google Cloud Storage JSON API enabled. This can be done in the Google Cloud Console.
### Prerequisites for your workstation ### Prerequisites for your workstation
1. Be running a Linux or Mac OS X. 1. Be running a Linux or Mac OS X.
2. You must have the [Google Cloud SDK](https://developers.google.com/cloud/sdk/) installed. This will get you `gcloud`, `gcutil` and `gsutil`. 2. You must have the [Google Cloud SDK](https://developers.google.com/cloud/sdk/) installed. This will get you `gcloud` and `gsutil`.
3. Ensure that your `gcloud` components are up-to-date by running `gcloud components update`. 3. Ensure that your `gcloud` components are up-to-date by running `gcloud components update`.
4. If you want to build your own release, you need to have [Docker 4. If you want to build your own release, you need to have [Docker
installed](https://docs.docker.com/installation/). On Mac OS X you can use installed](https://docs.docker.com/installation/). On Mac OS X you can use

View File

@ -59,10 +59,15 @@ Before you can use a GCE PD with a pod, you need to create it and format it.
__We are actively working on making this more streamlined.__ __We are actively working on making this more streamlined.__
```sh ```sh
gcutil adddisk --size_gb=<size> --zone=<zone> <name> DISK_NAME=my-data-disk
gcutil attachdisk --disk <name> kubernetes-master DISK_SIZE=500GB
gcutil ssh kubernetes-master sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-test2 /mnt/tmp ZONE=us-central1-a
gcutil detachdisk --device_name google-<name> kubernetes-master
gcloud compute disks create --size=$DISK_SIZE --zone=$ZONE $DISK_NAME
gcloud compute instances attach-disk --zone=$ZONE --disk=$DISK_NAME --device-name temp-data kubernetes-master
gcloud compute ssh --zone=$ZONE kubernetes-master \
--command "sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-temp-data /mnt/tmp"
gcloud compute instances detach-disk --zone=$ZONE --disk $DISK_NAME kubernetes-master
``` ```
#### GCE PD Example configuration: #### GCE PD Example configuration:

View File

@ -39,8 +39,7 @@ redis-master-pod gurpartap/redis kubernetes-minion-3.c.thockin-dev.intern
If you ssh to that machine, you can run `docker ps` to see the actual pod: If you ssh to that machine, you can run `docker ps` to see the actual pod:
```shell ```shell
$ gcutil ssh --zone us-central1-b kubernetes-minion-3 me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-minion-3
$ sudo docker ps
me@kubernetes-minion-3:~$ sudo docker ps me@kubernetes-minion-3:~$ sudo docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS CONTAINER ID IMAGE COMMAND CREATED STATUS
@ -165,33 +164,33 @@ redis-slave name=redis,role=slave name=redis,role=slave 10.0.0.2
guestbook name=guestbook 10.0.0.3 3000 guestbook name=guestbook 10.0.0.3 3000
``` ```
To play with the service itself, find the external IP of the load balancer from the [Google Cloud Console][cloud-console] or the `gcutil` tool, and visit `http://<ip>:3000`. To play with the service itself, find the external IP of the load balancer from the [Google Cloud Console][cloud-console] or the `gcloud` tool, and visit `http://<ip>:3000`.
```shell ```shell
$ gcutil getforwardingrule guestbook $ gcloud compute forwarding-rules describe --region=us-central1 guestbook
+---------------+-----------------------------------+ IPAddress: 11.22.33.44
| name | guestbook | IPProtocol: TCP
| description | | creationTimestamp: '2014-11-24T16:08:15.327-08:00'
| creation-time | 2014-10-15T19:07:24.837-07:00 | id: '17594840560849468061'
| region | us-central1 | kind: compute#forwardingRule
| ip | 12.34.56.78 | name: guestbook
| protocol | TCP | portRange: 1-65535
| port-range | 3000-3000 | region: https://www.googleapis.com/compute/v1/projects/jbeda-prod/regions/us-central1
| target | us-central1/targetPools/guestbook | selfLink: https://www.googleapis.com/compute/v1/projects/jbeda-prod/regions/us-central1/forwardingRules/guestbook
+---------------+-----------------------------------+ target: https://www.googleapis.com/compute/v1/projects/jbeda-prod/regions/us-central1/targetPools/guestbook
``` ```
You may need to open the firewall for port 3000 using the [console][cloud-console] or the `gcutil` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: You may need to open the firewall for port 3000 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`:
```shell ```shell
$ gcutil addfirewall --allowed=tcp:3000 --target_tags=kubernetes-minion kubernetes-minion-3000 $ gcloud compute firewall-rules create --allow=tcp:3000 --target-tags=kubernetes-minion kubernetes-minion-3000
``` ```
If you are running Kubernetes locally, you can just visit http://localhost:3000 If you are running Kubernetes locally, you can just visit http://localhost:3000
For details about limiting traffic to specific sources, see the [gcutil documentation][gcutil-docs] For details about limiting traffic to specific sources, see the [GCE firewall documentation][gce-firewall-docs].
[cloud-console]: https://console.developer.google.com [cloud-console]: https://console.developer.google.com
[gcutil-docs]: https://developers.google.com/compute/docs/gcutil/reference/firewall#addfirewall [gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls
### Step Seven: Cleanup ### Step Seven: Cleanup

View File

@ -66,8 +66,7 @@ redis-master dockerfile/redis kubernetes-minion-3.c.briandpe-api.inter
If you ssh to that machine, you can run `docker ps` to see the actual pod: If you ssh to that machine, you can run `docker ps` to see the actual pod:
```shell ```shell
$ gcutil ssh --zone us-central1-b kubernetes-minion-3 me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-minion-3
$ sudo docker ps
me@kubernetes-minion-3:~$ sudo docker ps me@kubernetes-minion-3:~$ sudo docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
@ -308,23 +307,23 @@ if (isset($_GET['cmd']) === true) {
} ?> } ?>
``` ```
To play with the service itself, find the name of a frontend, grab the external IP of that host from the [Google Cloud Console][cloud-console] or the `gcutil` tool, and visit `http://<host-ip>:8000`. To play with the service itself, find the name of a frontend, grab the external IP of that host from the [Google Cloud Console][cloud-console] or the `gcloud` tool, and visit `http://<host-ip>:8000`.
```shell ```shell
$ gcutil listinstances $ gcloud compute instances list
``` ```
You may need to open the firewall for port 8000 using the [console][cloud-console] or the `gcutil` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: You may need to open the firewall for port 8000 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`:
```shell ```shell
$ gcutil addfirewall --allowed=tcp:8000 --target_tags=kubernetes-minion kubernetes-minion-8000 $ gcloud compute firewall-rules create --allow=tcp:8000 --target-tags=kubernetes-minion kubernetes-minion-8000
``` ```
If you are running Kubernetes locally, you can just visit http://localhost:8000. If you are running Kubernetes locally, you can just visit http://localhost:8000.
For details about limiting traffic to specific sources, see the [gcutil documentation][gcutil-docs]. For details about limiting traffic to specific sources, see the [GCE firewall documentation][gce-firewall-docs].
[cloud-console]: https://console.developer.google.com [cloud-console]: https://console.developer.google.com
[gcutil-docs]: https://developers.google.com/compute/docs/gcutil/reference/firewall#addfirewall [gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls
### Step Six: Cleanup ### Step Six: Cleanup

View File

@ -33,10 +33,12 @@ It also assumes that `$DOCKER_HUB_USER` is set to your Docker user id. We use t
$ export DOCKER_HUB_USER=my-docker-id $ export DOCKER_HUB_USER=my-docker-id
``` ```
You may need to open the firewall for port 8080 using the [console][cloud-console] or the `gcutil` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`: You may need to open the firewall for port 8080 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion`:
```bash ```bash
$ gcutil addfirewall --allowed=tcp:8080 --target_tags=kubernetes-minion kubernetes-minion-8080 $ gcloud compute firewall-rules create \
--allow tcp:8080 --target-tags=kubernetes-minion \
--zone=us-central1-a kubernetes-minion-8080
``` ```
### Step Zero: Build the Docker images ### Step Zero: Build the Docker images

View File

@ -17,8 +17,6 @@
# Launches a container and verifies it can be reached. Assumes that # Launches a container and verifies it can be reached. Assumes that
# we're being called by hack/e2e-test.sh (we use some env vars it sets up). # we're being called by hack/e2e-test.sh (we use some env vars it sets up).
# TODO: Convert uses of gcutil to gcloud
set -o errexit set -o errexit
set -o nounset set -o nounset
set -o pipefail set -o pipefail
@ -41,7 +39,7 @@ function teardown() {
rm -rf ${config} rm -rf ${config}
echo "Waiting for disk to become unmounted" echo "Waiting for disk to become unmounted"
sleep 20 sleep 20
gcutil deletedisk -f --zone=${ZONE} ${disk_name} gcloud compute disks delete --quiet --zone="${ZONE}" "${disk_name}"
} }
trap "teardown" EXIT trap "teardown" EXIT
@ -49,13 +47,14 @@ trap "teardown" EXIT
perl -p -e "s/%.*%/${disk_name}/g" ${KUBE_ROOT}/examples/gce-pd/testpd.yaml > ${config} perl -p -e "s/%.*%/${disk_name}/g" ${KUBE_ROOT}/examples/gce-pd/testpd.yaml > ${config}
# Create and mount the disk. # Create and mount the disk.
gcutil adddisk --size_gb=10 --zone=${ZONE} ${disk_name} gcloud compute disks create --zone="${ZONE}" --size=10GB "${disk_name}"
gcutil attachdisk --disk ${disk_name} ${MASTER_NAME} gcloud compute instances attach-disk --zone="${ZONE}" --disk="${disk_name}" \
gcutil ssh ${MASTER_NAME} sudo rm -rf /mnt/tmp --device-name temp-data "${MASTER_NAME}"
gcutil ssh ${MASTER_NAME} sudo mkdir /mnt/tmp gcloud compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo rm -rf /mnt/tmp"
gcutil ssh ${MASTER_NAME} sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-${disk_name} /mnt/tmp gcloud compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo mkdir -p /mnt/tmp"
gcutil ssh ${MASTER_NAME} sudo umount /mnt/tmp gcloud compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-temp-data /mnt/tmp"
gcloud compute instances detach-disk --disk ${disk_name} --zone ${ZONE} ${MASTER_NAME} gcloud compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo umount /mnt/tmp"
gcloud compute instances detach-disk --zone="${ZONE}" --disk "${disk_name}" "${MASTER_NAME}"
${KUBECFG} -c ${config} create pods ${KUBECFG} -c ${config} create pods

View File

@ -40,6 +40,7 @@ var (
tests = flag.String("tests", "", "Run only tests in hack/e2e-suite matching this glob. Ignored if -test is set.") tests = flag.String("tests", "", "Run only tests in hack/e2e-suite matching this glob. Ignored if -test is set.")
root = flag.String("root", absOrDie(filepath.Clean(filepath.Join(path.Base(os.Args[0]), ".."))), "Root directory of kubernetes repository.") root = flag.String("root", absOrDie(filepath.Clean(filepath.Join(path.Base(os.Args[0]), ".."))), "Root directory of kubernetes repository.")
verbose = flag.Bool("v", false, "If true, print all command output.") verbose = flag.Bool("v", false, "If true, print all command output.")
trace_bash = flag.Bool("trace-bash", false, "If true, pass -x to bash to trace all bash commands")
checkVersionSkew = flag.Bool("check_version_skew", true, ""+ checkVersionSkew = flag.Bool("check_version_skew", true, ""+
"By default, verify that client and server have exact version match. "+ "By default, verify that client and server have exact version match. "+
"You can explicitly set to false if you're, e.g., testing client changes "+ "You can explicitly set to false if you're, e.g., testing client changes "+
@ -172,6 +173,9 @@ func Test() (failed, passed []string) {
func runBash(stepName, bashFragment string) bool { func runBash(stepName, bashFragment string) bool {
cmd := exec.Command("bash", "-s") cmd := exec.Command("bash", "-s")
if *trace_bash {
cmd.Args = append(cmd.Args, "-x")
}
cmd.Stdin = strings.NewReader(bashWrap(bashFragment)) cmd.Stdin = strings.NewReader(bashWrap(bashFragment))
return finishRunning(stepName, cmd) return finishRunning(stepName, cmd)
} }