Set shell options for reliability.

Tweak a few other small things in our shell scripts.
pull/6/head
Joe Beda 2014-10-06 13:25:27 -07:00
parent d43a6ec5a3
commit 96c1bc17ca
37 changed files with 304 additions and 198 deletions

View File

@ -46,18 +46,20 @@ function kube::build::make_binary() {
}
function kube::build::make_binaries() {
if [[ ${#targets[@]} -eq 0 ]]; then
targets=(
cmd/proxy
cmd/apiserver
cmd/controller-manager
cmd/kubelet
cmd/kubecfg
plugin/cmd/scheduler
)
local -a targets=(
cmd/proxy
cmd/apiserver
cmd/controller-manager
cmd/kubelet
cmd/kubecfg
plugin/cmd/scheduler
)
if [[ -n "${1-}" ]]; then
targets=("$1")
fi
binaries=()
local -a binaries=()
local target
for target in "${targets[@]}"; do
binaries+=("${KUBE_GO_PACKAGE}/${target}")
@ -66,11 +68,6 @@ function kube::build::make_binaries() {
ARCH_TARGET="${KUBE_TARGET}/${GOOS}/${GOARCH}"
mkdir -p "${ARCH_TARGET}"
if [[ -n "$1" ]]; then
kube::build::make_binary "$1"
exit 0
fi
local b
for b in "${binaries[@]}"; do
kube::build::make_binary "$b"

View File

@ -14,7 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/build/build-image/common.sh"

View File

@ -14,7 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/build/build-image/common.sh"

View File

@ -14,7 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/build/build-image/common.sh"

View File

@ -14,14 +14,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/build/build-image/common.sh"
echo "+++ Running unit tests"
if [[ -n "$1" ]]; then
if [[ -n "${1-}" ]]; then
godep go test -cover -coverprofile=tmp.out "$KUBE_GO_PACKAGE/$1"
exit 0
fi

View File

@ -53,7 +53,7 @@ readonly LOCAL_OUTPUT_BUILD="${LOCAL_OUTPUT_ROOT}/build"
readonly REMOTE_OUTPUT_ROOT="/go/src/${KUBE_GO_PACKAGE}/_output"
readonly REMOTE_OUTPUT_DIR="${REMOTE_OUTPUT_ROOT}/build"
readonly DOCKER_CONTAINER_NAME=kube-build
readonly DOCKER_MOUNT="-v ${LOCAL_OUTPUT_BUILD}:${REMOTE_OUTPUT_DIR}"
readonly DOCKER_MOUNT_ARGS=(--volume "${LOCAL_OUTPUT_BUILD}:${REMOTE_OUTPUT_DIR}")
readonly KUBE_CLIENT_BINARIES=(
kubecfg
@ -114,6 +114,7 @@ function kube::build::verify_prereqs() {
echo " - On Mac OS X, boot2docker VM isn't started" >&2
echo " - On Mac OS X, DOCKER_HOST env variable isn't set approriately" >&2
echo " - On Linux, user isn't in 'docker' group. Add and relogin." >&2
echo " Something like 'sudo usermod -a -G docker ${USER-user}'" >&2
echo " - On Linux, Docker daemon hasn't been started or has crashed" >&2
return 1
fi
@ -207,25 +208,23 @@ function kube::build::run_image() {
function kube::build::docker_build() {
local -r image=$1
local -r context_dir=$2
local -r build_cmd="docker build -t ${image} ${context_dir}"
local -ra build_cmd=(docker build -t "${image}" "${context_dir}")
echo "+++ Building Docker image ${image}. This can take a while."
set +e # We are handling the error here manually
local docker_output
docker_output=$(${build_cmd} 2>&1)
if [[ $? -ne 0 ]]; then
set -e
echo "+++ Docker build command failed for ${image}" >&2
echo >&2
echo "${docker_output}" >&2
echo >&2
echo "To retry manually, run:" >&2
echo >&2
echo " ${build_cmd}" >&2
echo >&2
docker_output=$("${build_cmd[@]}" 2>&1) || {
cat <<EOF >&2
+++ Docker build command failed for ${image}
${docker_output}
To retry manually, run:
${build_cmd[*]}
EOF
return 1
fi
set -e
}
}
function kube::build::clean_image() {
@ -252,18 +251,21 @@ function kube::build::clean_images() {
# Run a command in the kube-build image. This assumes that the image has
# already been built. This will sync out all output data from the build.
function kube::build::run_build_command() {
[[ -n "$@" ]] || { echo "Invalid input." >&2; return 4; }
[[ $# != 0 ]] || { echo "Invalid input." >&2; return 4; }
local -r docker="docker run --name=${DOCKER_CONTAINER_NAME} --attach=stdout --attach=stderr --attach=stdin --tty ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
local -ra docker_cmd=(
docker run "--name=${DOCKER_CONTAINER_NAME}"
--interactive --tty
"${DOCKER_MOUNT_ARGS[@]}" "${KUBE_BUILD_IMAGE}")
# Remove the container if it is left over from some previous aborted run
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
${docker} "$@"
docker rm "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1 || true
"${docker_cmd[@]}" "$@"
# Remove the container after we run. '--rm' might be appropriate but it
# appears that sometimes it fails. See
# https://github.com/docker/docker/issues/3968
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
docker rm "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1 || true
}
# If the Docker server is remote, copy the results back out.
@ -278,21 +280,23 @@ function kube::build::copy_output() {
# The easiest thing I (jbeda) could figure out was to launch another
# container pointed at the same volume, tar the output directory and ship
# that tar over stdou.
local -r docker="docker run -a stdout --name=${DOCKER_CONTAINER_NAME} ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
local -ra docker_cmd=(
docker run -a stdout "--name=${DOCKER_CONTAINER_NAME}"
"${DOCKER_MOUNT_ARGS[@]}" "${KUBE_BUILD_IMAGE}")
# Kill any leftover container
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
docker rm "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1 || true
echo "+++ Syncing back _output directory from boot2docker VM"
rm -rf "${LOCAL_OUTPUT_BUILD}"
mkdir -p "${LOCAL_OUTPUT_BUILD}"
${docker} sh -c "tar c -C ${REMOTE_OUTPUT_DIR} . ; sleep 1" \
"${docker_cmd[@]}" sh -c "tar c -C ${REMOTE_OUTPUT_DIR} . ; sleep 1" \
| tar xv -C "${LOCAL_OUTPUT_BUILD}"
# Remove the container after we run. '--rm' might be appropriate but it
# appears that sometimes it fails. See
# https://github.com/docker/docker/issues/3968
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
docker rm "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1 || true
# I (jbeda) also tried getting rsync working using 'docker run' as the
# 'remote shell'. This mostly worked but there was a hang when
@ -440,7 +444,7 @@ function kube::release::gcs::verify_prereqs() {
if [[ -z "${GCLOUD_ACCOUNT-}" ]]; then
GCLOUD_ACCOUNT=$(gcloud auth list 2>/dev/null | awk '/(active)/ { print $2 }')
fi
if [[ -z "${GCLOUD_ACCOUNT}" ]]; then
if [[ -z "${GCLOUD_ACCOUNT-}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud auth login"
@ -450,7 +454,7 @@ function kube::release::gcs::verify_prereqs() {
if [[ -z "${GCLOUD_PROJECT-}" ]]; then
GCLOUD_PROJECT=$(gcloud config list project | awk '{project = $3} END {print project}')
fi
if [[ -z "${GCLOUD_PROJECT}" ]]; then
if [[ -z "${GCLOUD_PROJECT-}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud config set project <project id>"
@ -471,9 +475,9 @@ function kube::release::gcs::ensure_release_bucket() {
KUBE_GCS_RELEASE_PREFIX=${KUBE_GCS_RELEASE_PREFIX-devel/}
KUBE_GCS_DOCKER_REG_PREFIX=${KUBE_GCS_DOCKER_REG_PREFIX-docker-reg/}
if ! gsutil ls gs://${KUBE_GCS_RELEASE_BUCKET} >/dev/null 2>&1 ; then
if ! gsutil ls "gs://${KUBE_GCS_RELEASE_BUCKET}" >/dev/null 2>&1 ; then
echo "Creating Google Cloud Storage bucket: $RELEASE_BUCKET"
gsutil mb gs://${KUBE_GCS_RELEASE_BUCKET}
gsutil mb "gs://${KUBE_GCS_RELEASE_BUCKET}"
fi
}
@ -487,7 +491,8 @@ function kube::release::gcs::ensure_docker_registry() {
# Grovel around and find the OAuth token in the gcloud config
local -r boto=~/.config/gcloud/legacy_credentials/${GCLOUD_ACCOUNT}/.boto
local -r refresh_token=$(grep 'gs_oauth2_refresh_token =' $boto | awk '{ print $3 }')
local refresh_token
refresh_token=$(grep 'gs_oauth2_refresh_token =' "$boto" | awk '{ print $3 }')
if [[ -z "$refresh_token" ]]; then
echo "Couldn't find OAuth 2 refresh token in ${boto}" >&2
@ -498,14 +503,16 @@ function kube::release::gcs::ensure_docker_registry() {
docker rm ${reg_container_name} >/dev/null 2>&1 || true
echo "+++ Starting GCS backed Docker registry"
local docker="docker run -d --name=${reg_container_name} "
docker+="-e GCS_BUCKET=${KUBE_GCS_RELEASE_BUCKET} "
docker+="-e STORAGE_PATH=${KUBE_GCS_DOCKER_REG_PREFIX} "
docker+="-e GCP_OAUTH2_REFRESH_TOKEN=${refresh_token} "
docker+="-p 127.0.0.1:5000:5000 "
docker+="google/docker-registry"
local -ra docker_cmd=(
docker run -d "--name=${reg_container_name}"
-e "GCS_BUCKET=${KUBE_GCS_RELEASE_BUCKET}"
-e "STORAGE_PATH=${KUBE_GCS_DOCKER_REG_PREFIX}"
-e "GCP_OAUTH2_REFRESH_TOKEN=${refresh_token}"
-p 127.0.0.1:5000:5000
google/docker-registry
)
${docker}
"${docker[@]}"
# Give it time to spin up before we start throwing stuff at it
sleep 5

View File

@ -18,6 +18,9 @@
#
# This is a no-op on Linux when the Docker daemon is local. This is only
# necessary on Mac OS X with boot2docker.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "$KUBE_ROOT/build/common.sh"

View File

@ -18,6 +18,9 @@
#
# This makes the docker build image, builds the binaries and copies them out
# of the docker container.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "$KUBE_ROOT/build/common.sh"

View File

@ -20,6 +20,9 @@
# Kubernetes into a tar file and put it in the right place in the output
# directory. It will then copy over the Dockerfile and build the kube-build
# image.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT="$(dirname "${BASH_SOURCE}")/.."
source "$KUBE_ROOT/build/common.sh"

View File

@ -15,6 +15,9 @@
# limitations under the License.
# Clean out the output directory on the docker host.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "$KUBE_ROOT/build/common.sh"

View File

@ -19,6 +19,10 @@
# This makes the docker build image, builds the cross binaries and copies them
# out of the docker container.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "$KUBE_ROOT/build/common.sh"

View File

@ -19,6 +19,10 @@
# This script will make the 'run image' after building all of the necessary
# binaries.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "$KUBE_ROOT/build/common.sh"

View File

@ -18,6 +18,10 @@
# images and other build artifacts. All intermediate artifacts will be hosted
# publicly on Google Cloud Storage currently.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "$KUBE_ROOT/build/common.sh"

View File

@ -16,6 +16,10 @@
# Run the integration test.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "$KUBE_ROOT/build/common.sh"

View File

@ -16,6 +16,10 @@
# Run all of the golang unit tests.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "$KUBE_ROOT/build/common.sh"

View File

@ -18,6 +18,10 @@
#
# This container will have a snapshot of the current sources.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "$KUBE_ROOT/build/common.sh"

View File

@ -23,14 +23,27 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
# Verify prereqs
function verify-prereqs {
for x in gcloud gcutil gsutil; do
if [ "$(which $x)" == "" ]; then
echo "Can't find $x in PATH, please fix and retry."
local cmd
for cmd in gcloud gcutil gsutil; do
which "${cmd}" >/dev/null || {
echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud "
echo "SDK can be downloaded from https://cloud.google.com/sdk/."
exit 1
fi
}
done
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
@ -62,12 +75,13 @@ function find-release-tars {
# Vars set:
# PROJECT
function detect-project () {
if [ -z "$PROJECT" ]; then
if [[ -z "${PROJECT-}" ]]; then
PROJECT=$(gcloud config list project | tail -n 1 | cut -f 3 -d ' ')
fi
if [ -z "$PROJECT" ]; then
echo "Could not detect Google Cloud Platform project. Set the default project using 'gcloud config set project <PROJECT>'" 1>&2
if [[ -z "${PROJECT-}" ]]; then
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
echo "'gcloud config set project <PROJECT>'" >&2
exit 1
fi
echo "Project: $PROJECT (autodetected from gcloud config)"
@ -126,15 +140,15 @@ function detect-minions () {
local minion_ip=$(gcutil listinstances --format=csv --sort=external-ip \
--columns=external-ip --zone ${ZONE} --filter="name eq ${MINION_NAMES[$i]}" \
| tail -n '+2' | tail -n 1)
if [ -z "$minion_ip" ] ; then
echo "Did not find ${MINION_NAMES[$i]}" 1>&2
if [[ -z "${minion_ip-}" ]] ; then
echo "Did not find ${MINION_NAMES[$i]}" >&2
else
echo "Found ${MINION_NAMES[$i]} at ${minion_ip}"
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
fi
done
if [ -z "$KUBE_MINION_IP_ADDRESSES" ]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" 1>&2
if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
}
@ -149,14 +163,14 @@ function detect-minions () {
# KUBE_MASTER_IP
function detect-master () {
KUBE_MASTER=${MASTER_NAME}
if [ -z "$KUBE_MASTER_IP" ]; then
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
# gcutil will print the "external-ip" column header even if no instances are found
KUBE_MASTER_IP=$(gcutil listinstances --format=csv --sort=external-ip \
--columns=external-ip --zone ${ZONE} --filter="name eq ${MASTER_NAME}" \
| tail -n '+2' | tail -n 1)
fi
if [ -z "$KUBE_MASTER_IP" ]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" 1>&2
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
@ -201,16 +215,15 @@ function kube-up {
find-release-tars
upload-server-tars
# Build up start up script for master
local kube_temp=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${kube_temp}"' EXIT
ensure-temp-dir
get-password
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${kube_temp}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd=$(cat "${kube_temp}/htpasswd")
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
if ! gcutil getnetwork "${NETWORK}"; then
if ! gcutil getnetwork "${NETWORK}" >/dev/null 2>&1; then
echo "Creating new network for: ${NETWORK}"
# The network needs to be created synchronously or we have a race. The
# firewalls can be added concurrent with instance creation.
@ -232,12 +245,12 @@ function kube-up {
fi
echo "Starting VMs and configuring firewalls"
gcutil addfirewall ${MASTER_NAME}-https \
--project ${PROJECT} \
gcutil addfirewall "${MASTER_NAME}-https" \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--network ${NETWORK} \
--target_tags ${MASTER_TAG} \
--network "${NETWORK}" \
--target_tags "${MASTER_TAG}" \
--allowed tcp:443 &
(
@ -251,67 +264,66 @@ function kube-up {
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/salt-master.sh"
) > "${kube_temp}/master-start.sh"
) > "${KUBE_TEMP}/master-start.sh"
gcutil addinstance ${MASTER_NAME}\
--project ${PROJECT} \
gcutil addinstance "${MASTER_NAME}" \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--zone ${ZONE} \
--machine_type ${MASTER_SIZE} \
--image ${IMAGE} \
--tags ${MASTER_TAG} \
--network ${NETWORK} \
--zone "${ZONE}" \
--machine_type "${MASTER_SIZE}" \
--image "${IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--service_account_scopes="storage-ro,compute-rw" \
--automatic_restart \
--metadata_from_file "startup-script:${kube_temp}/master-start.sh" &
--metadata_from_file "startup-script:${KUBE_TEMP}/master-start.sh" &
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'"
echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}"
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/salt-minion.sh"
) > "${kube_temp}/minion-start-${i}.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
gcutil addfirewall ${MINION_NAMES[$i]}-all \
--project ${PROJECT} \
gcutil addfirewall "${MINION_NAMES[$i]}-all" \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--network ${NETWORK} \
--allowed_ip_sources ${MINION_IP_RANGES[$i]} \
--network "${NETWORK}" \
--allowed_ip_sources "${MINION_IP_RANGES[$i]}" \
--allowed "tcp,udp,icmp,esp,ah,sctp" &
gcutil addinstance ${MINION_NAMES[$i]} \
--project ${PROJECT} \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--zone ${ZONE} \
--machine_type ${MINION_SIZE} \
--image ${IMAGE} \
--tags ${MINION_TAG} \
--network ${NETWORK} \
--service_account_scopes=${MINION_SCOPES} \
--zone "${ZONE}" \
--machine_type "${MINION_SIZE}" \
--image "${IMAGE}" \
--tags "${MINION_TAG}" \
--network "${NETWORK}" \
--service_account_scopes "${MINION_SCOPES}" \
--automatic_restart \
--can_ip_forward \
--metadata_from_file "startup-script:${kube_temp}/minion-start-${i}.sh" &
--metadata_from_file "startup-script:${KUBE_TEMP}/minion-start-${i}.sh" &
gcutil addroute ${MINION_NAMES[$i]} ${MINION_IP_RANGES[$i]} \
--project ${PROJECT} \
gcutil addroute "${MINION_NAMES[$i]}" "${MINION_IP_RANGES[$i]}" \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--network ${NETWORK} \
--next_hop_instance ${ZONE}/instances/${MINION_NAMES[$i]} &
--network "${NETWORK}" \
--next_hop_instance "${ZONE}/instances/${MINION_NAMES[$i]}" &
done
local fail=0
local job
for job in `jobs -p`
do
wait $job || let "fail+=1"
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
if (( $fail != 0 )); then
echo "${fail} commands failed. Exiting."
echo "${fail} commands failed. Exiting." >&2
exit 2
fi
@ -324,8 +336,8 @@ function kube-up {
echo " up."
echo
until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \
--fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do
printf "."
sleep 2
done
@ -340,12 +352,12 @@ function kube-up {
local rc # Capture return code without exiting because of errexit bash option
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed
gcutil ssh ${MINION_NAMES[$i]} which docker >/dev/null && rc=$? || rc=$?
if [[ "$rc" != "0" ]]; then
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly."
echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)"
exit 1
fi
gcutil ssh "${MINION_NAMES[$i]}" which docker >/dev/null || {
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo "cluster. (sorry!)" >&2
exit 1
}
done
echo
@ -387,43 +399,43 @@ function kube-down {
echo "Bringing down cluster"
gcutil deletefirewall \
--project ${PROJECT} \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--force \
${MASTER_NAME}-https &
"${MASTER_NAME}-https" &
gcutil deleteinstance \
--project ${PROJECT} \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--force \
--delete_boot_pd \
--zone ${ZONE} \
${MASTER_NAME} &
--zone "${ZONE}" \
"${MASTER_NAME}" &
gcutil deletefirewall \
--project ${PROJECT} \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--force \
${MINION_NAMES[*]/%/-all} &
"${MINION_NAMES[@]/%/-all}" &
gcutil deleteinstance \
--project ${PROJECT} \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--force \
--delete_boot_pd \
--zone ${ZONE} \
${MINION_NAMES[*]} &
--zone "${ZONE}" \
"${MINION_NAMES[@]}" &
gcutil deleteroute \
--project ${PROJECT} \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--force \
${MINION_NAMES[*]} &
"${MINION_NAMES[@]}" &
wait
@ -442,13 +454,13 @@ function kube-push {
echo "#! /bin/bash"
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly SERVER_BINARY_TAR_URL=${SERVER_BINARY_TAR_URL}"
echo "readonly SALT_TAR_URL=${SALT_TAR_URL}"
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/download-release.sh"
echo "echo Executing configuration"
echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate"
) | gcutil ssh --project $PROJECT --zone $ZONE $KUBE_MASTER sudo bash
) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash
get-password
@ -470,10 +482,8 @@ function kube-push {
# Assumed Vars:
# KUBE_ROOT
function test-build-release {
# Build source
"${KUBE_ROOT}/hack/build-go.sh"
# Make a release
"${KUBE_ROOT}/release/release.sh"
"${KUBE_ROOT}/build/release.sh"
}
# Execute prior to running tests to initialize required structure. This is
@ -492,13 +502,13 @@ function test-setup {
if [[ ${ALREADY_UP} -ne 1 ]]; then
# Open up port 80 & 8080 so common containers on minions can be reached
gcutil addfirewall \
--project ${PROJECT} \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--target_tags ${MINION_TAG} \
--target_tags "${MINION_TAG}" \
--allowed tcp:80,tcp:8080 \
--network ${NETWORK} \
${MINION_TAG}-${INSTANCE_PREFIX}-http-alt
--network "${NETWORK}" \
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt"
fi
}
@ -511,10 +521,10 @@ function test-setup {
function test-teardown {
echo "Shutting down test cluster in background."
gcutil deletefirewall \
--project ${PROJECT} \
--project "${PROJECT}" \
--norespect_terminal_width \
--sleep_between_polls "${POLL_SLEEP_INTERVAL}" \
--force \
${MINION_TAG}-${INSTANCE_PREFIX}-http-alt || true > /dev/null
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true > /dev/null
"${KUBE_ROOT}/cluster/kube-down.sh" > /dev/null
}

View File

@ -16,8 +16,9 @@
# Tear down a Kubernetes cluster.
# exit on any error
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/kube-env.sh"

View File

@ -19,8 +19,9 @@
# This will find the release tar, cause it to be downloaded, unpacked, installed
# and enacted.
# exit on any error
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/kube-env.sh"

View File

@ -20,8 +20,9 @@
# that directly. If not then we assume we are doing development stuff and take
# the defaults in the release config.
# exit on any error
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/kube-env.sh"

View File

@ -14,6 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/kube-env.sh"
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
@ -22,34 +26,36 @@ source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
;;
Linux)
host_os=linux
;;
;;
*)
echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
exit 1
;;
esac
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
;;
i?86_64*)
host_arch=amd64
;;
;;
amd64*)
host_arch=amd64
;;
;;
arm*)
host_arch=arm
;;
;;
i?86*)
host_arch=x86
;;
;;
*)
echo "Unsupported host arch. Must be x86_64, 386 or arm." >&2
exit 1
echo "Unsupported host arch. Must be x86_64, 386 or arm." >&2
exit 1
;;
esac
kubecfg="${KUBE_ROOT}/_output/build/${host_os}/${host_arch}/kubecfg"
@ -84,8 +90,8 @@ else
fi
detect-master > /dev/null
if [[ "$KUBE_MASTER_IP" != "" ]] && [[ "$KUBERNETES_MASTER" == "" ]]; then
if [[ -n "${KUBE_MASTER_IP-}" && -z "${KUBERNETES_MASTER-}" ]]; then
export KUBERNETES_MASTER=https://${KUBE_MASTER_IP}
fi
"$kubecfg" "${auth_config[@]}" "$@"
"$kubecfg" "${auth_config[@]:+${auth_config[@]}}" "$@"

View File

@ -21,7 +21,8 @@ set -o errexit
set -o nounset
set -o pipefail
readonly SALT_ROOT=$(dirname "${BASH_SOURCE}")
SALT_ROOT=$(dirname "${BASH_SOURCE}")
readonly SALT_ROOT
readonly SERVER_BIN_TAR=${1-}
if [[ -z "$SERVER_BIN_TAR" ]]; then
@ -31,7 +32,7 @@ fi
# Create a temp dir for untaring
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap "rm -rf ${KUBE_TEMP}" EXIT
trap 'rm -rf "${KUBE_TEMP}"' EXIT
# This file is meant to run on the master. It will install the salt configs
# into the appropriate place on the master.

View File

@ -20,8 +20,9 @@
# that directly. If not then we assume we are doing development stuff and take
# the defaults in the release config.
# exit on any error
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/kube-env.sh"
@ -44,13 +45,15 @@ fi
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Grep returns an exit status of 1 when line is not found, so we need the : to always return a 0 exit status
count=$(grep -c ${MINION_NAMES[i]} ${MINIONS_FILE}) || :
if [ "$count" == "0" ]; then
if [[ "$count" == "0" ]]; then
echo "Failed to find ${MINION_NAMES[i]}, cluster is probably broken."
exit 1
fi
# Make sure the kubelet is healthy
if [ "$(curl -s --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} https://${KUBE_MASTER_IP}/proxy/minion/${MINION_NAMES[$i]}/healthz)" != "ok" ]; then
curl_output=$(curl -s --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" \
"https://${KUBE_MASTER_IP}/proxy/minion/${MINION_NAMES[$i]}/healthz")
if [[ "${curl_output}" != "ok" ]]; then
echo "Kubelet failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly."
echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)"
exit 1

View File

@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_COVER=" " KUBE_RACE=" " hack/test-go.sh "" -test.run="^X" -benchtime=1s -bench=. -benchmem
KUBE_COVER=" " KUBE_RACE=" " hack/test-go.sh "" -test.run="^X" -benchtime=1s -bench=. -benchmem

View File

@ -199,6 +199,7 @@ export KUBE_GO_PACKAGE
set -o errexit
set -o nounset
set -o pipefail
go_pkg_dir="${KUBE_TARGET}/src/${KUBE_GO_PACKAGE}"
go_pkg_basedir=$(dirname "${go_pkg_dir}")
mkdir -p "${go_pkg_basedir}"

View File

@ -16,6 +16,10 @@
# This script will build a dev release and push it to an existing cluster.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# Build a release

View File

@ -17,7 +17,9 @@
# This script will build a dev release and bring up a new cluster with that
# release.
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..

View File

@ -17,9 +17,11 @@
# Launches an nginx container and verifies it can be reached. Assumes that
# we're being called by hack/e2e-test.sh (we use some env vars it sets up).
# Exit on error
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/kube-env.sh"
source "${KUBE_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
@ -41,17 +43,17 @@ function teardown() {
trap "teardown" EXIT
POD_ID_LIST=$($KUBECFG '-template={{range.Items}}{{.ID}} {{end}}' -l replicationController=myNginx list pods)
pod_id_list=$($KUBECFG '-template={{range.Items}}{{.ID}} {{end}}' -l replicationController=myNginx list pods)
# Container turn up on a clean cluster can take a while for the docker image pull.
ALL_RUNNING=0
while [ $ALL_RUNNING -ne 1 ]; do
all_running=0
while [[ $all_running -ne 1 ]]; do
echo "Waiting for all containers in pod to come up."
sleep 5
ALL_RUNNING=1
for id in $POD_ID_LIST; do
CURRENT_STATUS=$($KUBECFG -template '{{and .CurrentState.Info.mynginx.State.Running .CurrentState.Info.net.State.Running}}' get pods/$id)
if [ "$CURRENT_STATUS" != "{0001-01-01 00:00:00 +0000 UTC}" ]; then
ALL_RUNNING=0
all_running=1
for id in $pod_id_list; do
current_status=$($KUBECFG -template '{{and .CurrentState.Info.mynginx.State.Running .CurrentState.Info.net.State.Running}}' get pods/$id) || true
if [[ "$current_status" != "{0001-01-01 00:00:00 +0000 UTC}" ]]; then
all_running=0
fi
done
done
@ -65,9 +67,9 @@ sleep 5
# Verify that something is listening (nginx should give us a 404)
for (( i=0; i<${#KUBE_MINION_IP_ADDRESSES[@]}; i++)); do
IP_ADDRESS=${KUBE_MINION_IP_ADDRESSES[$i]}
echo "Trying to reach nginx instance that should be running at ${IP_ADDRESS}:8080..."
curl "http://${IP_ADDRESS}:8080"
ip_address=${KUBE_MINION_IP_ADDRESSES[$i]}
echo "Trying to reach nginx instance that should be running at ${ip_address}:8080..."
curl "http://${ip_address}:8080"
done
exit 0

View File

@ -18,8 +18,11 @@
# works. Assumes that we're being called by hack/e2e-test.sh (we use some env
# vars it sets up).
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/kube-env.sh"
source "${KUBE_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"

View File

@ -20,6 +20,7 @@ set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/kube-env.sh"
source "${KUBE_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"

View File

@ -17,6 +17,10 @@
# Starts a Kubernetes cluster, runs the e2e test suite, and shuts it
# down.
set -o errexit
set -o nounset
set -o pipefail
# Use testing config
export KUBE_CONFIG_FILE="config-test.sh"
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
@ -34,9 +38,6 @@ ALREADY_UP=${1:-0}
LEAVE_UP=${2:-0}
TEAR_DOWN=${3:-0}
# Exit on error
set -e
if [[ $TEAR_DOWN -ne 0 ]]; then
detect-project
trap test-teardown EXIT

View File

@ -14,9 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
cd third_party
git clone https://github.com/coreos/etcd.git
cd etcd
git checkout ab4bcc18694644d12f0c038339d8d039072502b1
git checkout ab4bcc18694644d12f0c038339d8d039072502b1
./build

View File

@ -16,7 +16,9 @@
# This script installs std -race on Travis (see https://code.google.com/p/go/issues/detail?id=6479)
set -e
set -o errexit
set -o nounset
set -o pipefail
if [ "${TRAVIS}" == "true" ]; then
GO_VERSION=($(go version))

View File

@ -17,18 +17,21 @@
# This command checks that the built commands can function together for
# simple scenarios. It does not require Docker so it can run in travis.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/util.sh"
source "${KUBE_ROOT}/hack/config-go.sh"
function cleanup()
{
set +e
kill ${APISERVER_PID} 1>&2 2>/dev/null
kill ${CTLRMGR_PID} 1>&2 2>/dev/null
kill ${KUBELET_PID} 1>&2 2>/dev/null
kill ${PROXY_PID} 1>&2 2>/dev/null
kill ${ETCD_PID} 1>&2 2>/dev/null
[[ -n ${APISERVER_PID-} ]] && kill ${APISERVER_PID} 1>&2 2>/dev/null
[[ -n ${CTLRMGR_PID-} ]] && kill ${CTLRMGR_PID} 1>&2 2>/dev/null
[[ -n ${KUBELET_PID-} ]] && kill ${KUBELET_PID} 1>&2 2>/dev/null
[[ -n ${PROXY_PID-} ]] && kill ${PROXY_PID} 1>&2 2>/dev/null
[[ -n ${ETCD_PID-} ]] && kill ${ETCD_PID} 1>&2 2>/dev/null
rm -rf ${ETCD_DIR} 1>&2 2>/dev/null
echo
echo "Complete"

View File

@ -14,8 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/config-go.sh"
@ -138,7 +139,7 @@ if [[ "${iterations}" -gt 1 ]]; then
fi
fi
if [[ -n "$1" ]]; then
if [[ -n "${1-}" ]]; then
covdir="/tmp/k8s_coverage/$(date "+%s")"
echo saving coverage output in "${covdir}"
for arg; do

View File

@ -14,6 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
result=0

View File

@ -16,6 +16,11 @@
# GoFmt apparently is changing @ head...
set -o errexit
set -o nounset
set -o pipefail
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3') ]]; then