2014-10-14 14:51:50 +00:00
|
|
|
#!/bin/bash
|
2014-06-17 06:16:40 +00:00
|
|
|
|
|
|
|
# Copyright 2014 Google Inc. All rights reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2014-10-14 14:51:50 +00:00
|
|
|
# Common utilities, variables and checks for all build scripts.
|
2014-09-15 18:57:13 +00:00
|
|
|
set -o errexit
|
|
|
|
set -o nounset
|
|
|
|
set -o pipefail
|
2014-06-17 06:16:40 +00:00
|
|
|
|
2014-10-03 21:58:49 +00:00
|
|
|
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|
|
|
cd "${KUBE_ROOT}"
|
2014-09-08 23:55:57 +00:00
|
|
|
|
2014-10-09 21:08:34 +00:00
|
|
|
# This'll canonicalize the path
|
|
|
|
KUBE_ROOT=$PWD
|
|
|
|
|
2014-09-08 23:55:57 +00:00
|
|
|
source hack/config-go.sh
|
|
|
|
|
2014-09-15 18:57:13 +00:00
|
|
|
# Incoming options
|
2014-10-01 18:42:45 +00:00
|
|
|
#
|
2014-10-07 20:49:18 +00:00
|
|
|
readonly KUBE_SKIP_CONFIRMATIONS="${KUBE_SKIP_CONFIRMATIONS:-n}"
|
2014-10-01 18:42:45 +00:00
|
|
|
readonly KUBE_BUILD_RUN_IMAGES="${KUBE_BUILD_RUN_IMAGES:-n}"
|
|
|
|
readonly KUBE_GCS_UPLOAD_RELEASE="${KUBE_GCS_UPLOAD_RELEASE:-n}"
|
|
|
|
readonly KUBE_GCS_NO_CACHING="${KUBE_GCS_NO_CACHING:-y}"
|
|
|
|
readonly KUBE_GCS_MAKE_PUBLIC="${KUBE_GCS_MAKE_PUBLIC:-y}"
|
|
|
|
# KUBE_GCS_RELEASE_BUCKET default: kubernetes-releases-${project_hash}
|
|
|
|
# KUBE_GCS_RELEASE_PREFIX default: devel/
|
|
|
|
# KUBE_GCS_DOCKER_REG_PREFIX default: docker-reg/
|
|
|
|
|
2014-10-01 16:37:45 +00:00
|
|
|
|
2014-09-15 18:57:13 +00:00
|
|
|
|
|
|
|
# Constants
|
|
|
|
readonly KUBE_BUILD_IMAGE_REPO=kube-build
|
2014-10-09 21:08:34 +00:00
|
|
|
# These get set in verify_prereqs with a unique hash based on KUBE_ROOT
|
|
|
|
# KUBE_BUILD_IMAGE_TAG=<hash>
|
|
|
|
# KUBE_BUILD_IMAGE="${KUBE_BUILD_IMAGE_REPO}:${KUBE_BUILD_IMAGE_TAG}"
|
|
|
|
# KUBE_BUILD_CONTAINER_NAME=kube-build-<hash>
|
2014-10-21 22:13:14 +00:00
|
|
|
readonly KUBE_BUILD_IMAGE_CROSS_TAG=cross
|
|
|
|
readonly KUBE_BUILD_IMAGE_CROSS="${KUBE_BUILD_IMAGE_REPO}:${KUBE_BUILD_IMAGE_CROSS_TAG}"
|
2014-10-22 23:52:20 +00:00
|
|
|
readonly KUBE_BUILD_GOLANG_VERSION=1.3
|
2014-09-15 18:57:13 +00:00
|
|
|
|
2014-06-17 06:16:40 +00:00
|
|
|
readonly KUBE_GO_PACKAGE="github.com/GoogleCloudPlatform/kubernetes"
|
|
|
|
|
2014-08-29 20:51:16 +00:00
|
|
|
# We set up a volume so that we have the same _output directory from one run of
|
2014-06-17 06:16:40 +00:00
|
|
|
# the container to the next.
|
|
|
|
#
|
|
|
|
# Note that here "LOCAL" is local to the docker daemon. In the boot2docker case
|
|
|
|
# this is still inside the VM. We use the same directory in both cases though.
|
2014-10-03 21:58:49 +00:00
|
|
|
readonly LOCAL_OUTPUT_ROOT="${KUBE_ROOT}/_output"
|
2014-09-15 18:57:13 +00:00
|
|
|
readonly LOCAL_OUTPUT_BUILD="${LOCAL_OUTPUT_ROOT}/build"
|
2014-10-11 14:34:22 +00:00
|
|
|
readonly LOCAL_OUTPUT_IMAGE_STAGING="${LOCAL_OUTPUT_ROOT}/images"
|
2014-09-15 18:57:13 +00:00
|
|
|
readonly REMOTE_OUTPUT_ROOT="/go/src/${KUBE_GO_PACKAGE}/_output"
|
|
|
|
readonly REMOTE_OUTPUT_DIR="${REMOTE_OUTPUT_ROOT}/build"
|
2014-10-06 20:25:27 +00:00
|
|
|
readonly DOCKER_MOUNT_ARGS=(--volume "${LOCAL_OUTPUT_BUILD}:${REMOTE_OUTPUT_DIR}")
|
2014-09-15 18:57:13 +00:00
|
|
|
|
|
|
|
readonly KUBE_CLIENT_BINARIES=(
|
|
|
|
kubecfg
|
2014-10-15 23:50:17 +00:00
|
|
|
kubectl
|
2014-09-15 18:57:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
readonly KUBE_SERVER_BINARIES=(
|
|
|
|
apiserver
|
|
|
|
controller-manager
|
|
|
|
kubelet
|
|
|
|
proxy
|
|
|
|
scheduler
|
|
|
|
)
|
|
|
|
|
|
|
|
readonly KUBE_SERVER_PLATFORMS=(
|
|
|
|
linux/amd64
|
|
|
|
)
|
2014-06-17 06:16:40 +00:00
|
|
|
|
2014-06-20 00:02:54 +00:00
|
|
|
readonly KUBE_RUN_IMAGE_BASE="kubernetes"
|
2014-09-15 18:57:13 +00:00
|
|
|
readonly KUBE_RUN_IMAGES=(
|
2014-09-08 23:12:38 +00:00
|
|
|
apiserver
|
|
|
|
controller-manager
|
|
|
|
proxy
|
|
|
|
scheduler
|
2014-10-09 00:54:52 +00:00
|
|
|
kubelet
|
2014-10-10 04:42:48 +00:00
|
|
|
bootstrap
|
2014-09-08 23:12:38 +00:00
|
|
|
)
|
|
|
|
|
2014-06-17 06:16:40 +00:00
|
|
|
|
2014-06-20 22:17:14 +00:00
|
|
|
# This is where the final release artifacts are created locally
|
2014-09-15 18:57:13 +00:00
|
|
|
readonly RELEASE_DIR="${LOCAL_OUTPUT_ROOT}/release-tars"
|
2014-06-20 22:17:14 +00:00
|
|
|
|
2014-06-20 18:08:50 +00:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Basic setup functions
|
|
|
|
|
2014-10-09 21:08:34 +00:00
|
|
|
# Verify that the right utilities and such are installed for building Kube. Set
|
|
|
|
# up some dynamic constants.
|
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# KUBE_BUILD_IMAGE_TAG
|
|
|
|
# KUBE_BUILD_IMAGE
|
|
|
|
# KUBE_BUILD_CONTAINER_NAME
|
|
|
|
# KUBE_ROOT_HASH
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::build::verify_prereqs() {
|
2014-06-20 00:02:54 +00:00
|
|
|
if [[ -z "$(which docker)" ]]; then
|
|
|
|
echo "Can't find 'docker' in PATH, please fix and retry." >&2
|
|
|
|
echo "See https://docs.docker.com/installation/#installation for installation instructions." >&2
|
|
|
|
return 1
|
2014-06-17 06:16:40 +00:00
|
|
|
fi
|
2014-06-20 00:02:54 +00:00
|
|
|
|
2014-09-15 18:57:13 +00:00
|
|
|
if kube::build::is_osx; then
|
2014-06-20 00:02:54 +00:00
|
|
|
if [[ -z "$(which boot2docker)" ]]; then
|
|
|
|
echo "It looks like you are running on Mac OS X and boot2docker can't be found." >&2
|
|
|
|
echo "See: https://docs.docker.com/installation/mac/" >&2
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
if [[ $(boot2docker status) != "running" ]]; then
|
|
|
|
echo "boot2docker VM isn't started. Please run 'boot2docker start'" >&2
|
|
|
|
return 1
|
|
|
|
fi
|
2014-06-17 06:16:40 +00:00
|
|
|
fi
|
|
|
|
|
2014-06-20 00:02:54 +00:00
|
|
|
if ! docker info > /dev/null 2>&1 ; then
|
2014-10-09 19:52:32 +00:00
|
|
|
{
|
|
|
|
echo "Can't connect to 'docker' daemon. please fix and retry."
|
|
|
|
echo
|
|
|
|
echo "Possible causes:"
|
|
|
|
echo " - On Mac OS X, boot2docker VM isn't started"
|
|
|
|
echo " - On Mac OS X, DOCKER_HOST env variable isn't set approriately"
|
|
|
|
echo " - On Linux, user isn't in 'docker' group. Add and relogin."
|
|
|
|
echo " - Something like 'sudo usermod -a -G docker ${USER-user}'"
|
|
|
|
echo " - RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8"
|
|
|
|
echo " - On Linux, Docker daemon hasn't been started or has crashed"
|
|
|
|
} >&2
|
2014-06-20 00:02:54 +00:00
|
|
|
return 1
|
|
|
|
fi
|
2014-10-09 21:08:34 +00:00
|
|
|
|
|
|
|
KUBE_ROOT_HASH=$(kube::build::short_hash "$KUBE_ROOT")
|
|
|
|
KUBE_BUILD_IMAGE_TAG="build-${KUBE_ROOT_HASH}"
|
|
|
|
KUBE_BUILD_IMAGE="${KUBE_BUILD_IMAGE_REPO}:${KUBE_BUILD_IMAGE_TAG}"
|
|
|
|
KUBE_BUILD_CONTAINER_NAME="kube-build-${KUBE_ROOT_HASH}"
|
2014-06-20 00:02:54 +00:00
|
|
|
}
|
|
|
|
|
2014-09-15 18:57:13 +00:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Utility functions
|
|
|
|
|
|
|
|
function kube::build::is_osx() {
|
2014-10-14 14:51:50 +00:00
|
|
|
[[ "$(uname)" == "Darwin" ]]
|
2014-09-15 18:57:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
function kube::build::clean_output() {
|
|
|
|
# Clean out the output directory if it exists.
|
2014-10-10 20:51:44 +00:00
|
|
|
if kube::build::build_image_built ; then
|
2014-10-11 14:34:22 +00:00
|
|
|
echo "+++ Cleaning out _output/build via docker build image"
|
2014-10-10 21:14:37 +00:00
|
|
|
kube::build::run_build_command bash -c "rm -rf '${REMOTE_OUTPUT_DIR}'/*"
|
2014-10-10 20:51:44 +00:00
|
|
|
else
|
|
|
|
echo "!!! Build image not built. Cannot clean via docker build image."
|
2014-09-15 18:57:13 +00:00
|
|
|
fi
|
|
|
|
|
2014-10-10 20:51:44 +00:00
|
|
|
echo "+++ Cleaning out local _output directory"
|
2014-09-15 18:57:13 +00:00
|
|
|
rm -rf "${LOCAL_OUTPUT_ROOT}"
|
|
|
|
}
|
|
|
|
|
2014-10-09 19:52:32 +00:00
|
|
|
# Make sure the _output directory is created and mountable by docker
|
|
|
|
function kube::build::prepare_output() {
|
|
|
|
mkdir -p "${LOCAL_OUTPUT_ROOT}"
|
|
|
|
|
|
|
|
# On RHEL/Fedora SELinux is enabled by default and currently breaks docker
|
|
|
|
# volume mounts. We can work around this by explicitly adding a security
|
|
|
|
# context to the _output directory.
|
|
|
|
# Details: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Resource_Management_and_Linux_Containers_Guide/sec-Sharing_Data_Across_Containers.html#sec-Mounting_a_Host_Directory_to_a_Container
|
|
|
|
if which selinuxenabled >/dev/null && \
|
|
|
|
selinuxenabled && \
|
|
|
|
which chcon >/dev/null ; then
|
|
|
|
if [[ ! $(ls -Zd "${LOCAL_OUTPUT_ROOT}") =~ svirt_sandbox_file_t ]] ; then
|
|
|
|
echo "+++ Applying SELinux policy to '_output' directory. If this fails it may be"
|
|
|
|
echo " because you have root owned files under _output. Delete those and continue"
|
|
|
|
chcon -Rt svirt_sandbox_file_t "${LOCAL_OUTPUT_ROOT}"
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-10-07 20:49:18 +00:00
|
|
|
# Detect if a specific image exists
|
|
|
|
#
|
|
|
|
# $1 - image repo name
|
|
|
|
# #2 - image tag
|
|
|
|
function kube::build::docker_image_exists() {
|
|
|
|
[[ -n $1 && -n $2 ]] || {
|
|
|
|
echo "!!! Internal error. Image not specified in docker_image_exists." >&2
|
|
|
|
exit 2
|
|
|
|
}
|
|
|
|
|
|
|
|
# We cannot just specify the IMAGE here as `docker images` doesn't behave as
|
|
|
|
# expected. See: https://github.com/docker/docker/issues/8048
|
|
|
|
docker images | grep -Eq "^${1}\s+${2}\s+"
|
|
|
|
}
|
|
|
|
|
2014-10-09 21:08:34 +00:00
|
|
|
# Takes $1 and computes a short has for it. Useful for unique tag generation
|
|
|
|
function kube::build::short_hash() {
|
|
|
|
[[ $# -eq 1 ]] || {
|
|
|
|
echo "!!! Internal error. No data based to short_hash." >&2
|
|
|
|
exit 2
|
|
|
|
}
|
|
|
|
|
|
|
|
local short_hash
|
|
|
|
if which md5 >/dev/null 2>&1; then
|
|
|
|
short_hash=$(md5 -q -s "$1")
|
|
|
|
else
|
|
|
|
short_hash=$(echo -n "$1" | md5sum)
|
|
|
|
fi
|
|
|
|
echo ${short_hash:0:5}
|
|
|
|
}
|
|
|
|
|
2014-06-20 18:08:50 +00:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Building
|
2014-06-17 06:16:40 +00:00
|
|
|
|
2014-09-15 18:57:13 +00:00
|
|
|
function kube::build::build_image_built() {
|
2014-10-07 20:49:18 +00:00
|
|
|
kube::build::docker_image_exists "${KUBE_BUILD_IMAGE_REPO}" "${KUBE_BUILD_IMAGE_TAG}"
|
|
|
|
}
|
|
|
|
|
2014-10-21 22:13:14 +00:00
|
|
|
function kube::build::ensure_golang() {
|
|
|
|
kube::build::docker_image_exists golang 1.3 || {
|
2014-10-07 20:49:18 +00:00
|
|
|
[[ ${KUBE_SKIP_CONFIRMATIONS} =~ ^[yY]$ ]] || {
|
2014-10-22 23:52:20 +00:00
|
|
|
echo "You don't have a local copy of the golang docker image. This image is 450MB."
|
2014-10-07 20:49:18 +00:00
|
|
|
read -p "Download it now? [y/n] " -n 1 -r
|
|
|
|
echo
|
|
|
|
[[ $REPLY =~ ^[yY]$ ]] || {
|
|
|
|
echo "Aborting." >&2
|
|
|
|
exit 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-22 23:52:20 +00:00
|
|
|
echo "+++ Pulling docker image: golang:${KUBE_BUILD_GOLANG_VERSION}"
|
|
|
|
docker pull golang:${KUBE_BUILD_GOLANG_VERSION}
|
2014-10-07 20:49:18 +00:00
|
|
|
}
|
2014-09-15 18:57:13 +00:00
|
|
|
}
|
|
|
|
|
2014-06-17 06:16:40 +00:00
|
|
|
# Set up the context directory for the kube-build image and build it.
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::build::build_image() {
|
2014-10-11 14:34:22 +00:00
|
|
|
local -r build_context_dir="${LOCAL_OUTPUT_IMAGE_STAGING}/${KUBE_BUILD_IMAGE}"
|
2014-09-09 22:03:32 +00:00
|
|
|
local -r source=(
|
2014-06-17 06:16:40 +00:00
|
|
|
api
|
|
|
|
build
|
|
|
|
cmd
|
2014-08-14 23:28:47 +00:00
|
|
|
examples
|
2014-09-29 20:37:04 +00:00
|
|
|
Godeps/Godeps.json
|
|
|
|
Godeps/_workspace/src
|
2014-06-17 06:16:40 +00:00
|
|
|
hack
|
2014-08-14 23:28:47 +00:00
|
|
|
LICENSE
|
2014-06-17 06:16:40 +00:00
|
|
|
pkg
|
2014-08-28 00:40:42 +00:00
|
|
|
plugin
|
2014-09-05 21:09:20 +00:00
|
|
|
README.md
|
2014-06-17 06:16:40 +00:00
|
|
|
third_party
|
2014-09-08 23:12:38 +00:00
|
|
|
)
|
2014-10-07 20:49:18 +00:00
|
|
|
|
2014-10-21 22:13:14 +00:00
|
|
|
kube::build::build_image_cross
|
2014-10-07 20:49:18 +00:00
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
mkdir -p "${build_context_dir}"
|
|
|
|
tar czf "${build_context_dir}/kube-source.tar.gz" "${source[@]}"
|
|
|
|
cat >"${build_context_dir}/kube-version-defs" <<EOF
|
2014-09-08 23:55:57 +00:00
|
|
|
KUBE_LD_FLAGS="$(kube::version_ldflags)"
|
|
|
|
EOF
|
2014-09-09 22:03:32 +00:00
|
|
|
cp build/build-image/Dockerfile ${build_context_dir}/Dockerfile
|
|
|
|
kube::build::docker_build "${KUBE_BUILD_IMAGE}" "${build_context_dir}"
|
2014-06-20 00:02:54 +00:00
|
|
|
}
|
|
|
|
|
2014-10-21 22:13:14 +00:00
|
|
|
# Build the kubernetes golang cross base image.
|
|
|
|
function kube::build::build_image_cross() {
|
|
|
|
kube::build::ensure_golang
|
|
|
|
|
|
|
|
local -r build_context_dir="${LOCAL_OUTPUT_ROOT}/images/${KUBE_BUILD_IMAGE}/cross"
|
|
|
|
mkdir -p "${build_context_dir}"
|
|
|
|
cp build/build-image/cross/Dockerfile ${build_context_dir}/Dockerfile
|
|
|
|
kube::build::docker_build "${KUBE_BUILD_IMAGE_CROSS}" "${build_context_dir}"
|
|
|
|
}
|
|
|
|
|
2014-06-20 00:02:54 +00:00
|
|
|
# Builds the runtime image. Assumes that the appropriate binaries are already
|
2014-08-29 20:51:16 +00:00
|
|
|
# built and in _output/build/.
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::build::run_image() {
|
2014-10-07 20:49:18 +00:00
|
|
|
[[ ${KUBE_BUILD_RUN_IMAGES} =~ ^[yY]$ ]] || return 0
|
2014-09-15 18:57:13 +00:00
|
|
|
|
2014-10-11 14:34:22 +00:00
|
|
|
local -r build_context_base="${LOCAL_OUTPUT_IMAGE_STAGING}/${KUBE_RUN_IMAGE_BASE}"
|
2014-06-17 06:16:40 +00:00
|
|
|
|
2014-06-20 00:02:54 +00:00
|
|
|
# First build the base image. This one brings in all of the binaries.
|
2014-09-09 22:03:32 +00:00
|
|
|
mkdir -p "${build_context_base}"
|
|
|
|
tar czf "${build_context_base}/kube-bins.tar.gz" \
|
2014-09-15 18:57:13 +00:00
|
|
|
-C "${LOCAL_OUTPUT_ROOT}/build/linux/amd64" \
|
|
|
|
"${KUBE_RUN_IMAGES[@]}"
|
2014-09-09 22:03:32 +00:00
|
|
|
cp -R build/run-images/base/* "${build_context_base}/"
|
|
|
|
kube::build::docker_build "${KUBE_RUN_IMAGE_BASE}" "${build_context_base}"
|
2014-06-20 00:02:54 +00:00
|
|
|
|
2014-09-08 23:12:38 +00:00
|
|
|
local b
|
2014-09-15 18:57:13 +00:00
|
|
|
for b in "${KUBE_RUN_IMAGES[@]}" ; do
|
2014-09-09 22:03:32 +00:00
|
|
|
local sub_context_dir="${build_context_base}-$b"
|
|
|
|
mkdir -p "${sub_context_dir}"
|
|
|
|
cp -R build/run-images/$b/* "${sub_context_dir}/"
|
|
|
|
kube::build::docker_build "${KUBE_RUN_IMAGE_BASE}-$b" "${sub_context_dir}"
|
2014-06-20 00:02:54 +00:00
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
# Build a docker image from a Dockerfile.
|
|
|
|
# $1 is the name of the image to build
|
|
|
|
# $2 is the location of the "context" directory, with the Dockerfile at the root.
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::build::docker_build() {
|
|
|
|
local -r image=$1
|
|
|
|
local -r context_dir=$2
|
2014-10-06 20:25:27 +00:00
|
|
|
local -ra build_cmd=(docker build -t "${image}" "${context_dir}")
|
2014-06-20 00:02:54 +00:00
|
|
|
|
2014-10-07 20:49:18 +00:00
|
|
|
echo "+++ Building Docker image ${image}."
|
2014-10-01 16:37:45 +00:00
|
|
|
local docker_output
|
2014-10-06 20:25:27 +00:00
|
|
|
docker_output=$("${build_cmd[@]}" 2>&1) || {
|
|
|
|
cat <<EOF >&2
|
|
|
|
+++ Docker build command failed for ${image}
|
|
|
|
|
|
|
|
${docker_output}
|
|
|
|
|
|
|
|
To retry manually, run:
|
|
|
|
|
|
|
|
${build_cmd[*]}
|
|
|
|
|
|
|
|
EOF
|
2014-06-17 06:16:40 +00:00
|
|
|
return 1
|
2014-10-06 20:25:27 +00:00
|
|
|
}
|
2014-06-20 00:02:54 +00:00
|
|
|
}
|
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::build::clean_image() {
|
|
|
|
local -r image=$1
|
2014-09-08 23:12:38 +00:00
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
echo "+++ Deleting docker image ${image}"
|
|
|
|
docker rmi ${image} 2> /dev/null || true
|
2014-09-08 23:12:38 +00:00
|
|
|
}
|
|
|
|
|
2014-09-15 18:57:13 +00:00
|
|
|
function kube::build::clean_images() {
|
|
|
|
kube::build::clean_image "${KUBE_BUILD_IMAGE}"
|
|
|
|
|
|
|
|
kube::build::clean_image "${KUBE_RUN_IMAGE_BASE}"
|
|
|
|
|
|
|
|
local b
|
|
|
|
for b in "${KUBE_RUN_IMAGES[@]}" ; do
|
|
|
|
kube::build::clean_image "${KUBE_RUN_IMAGE_BASE}-${b}"
|
|
|
|
done
|
|
|
|
|
|
|
|
echo "+++ Cleaning all other untagged docker images"
|
2014-10-23 18:28:34 +00:00
|
|
|
docker rmi $(docker images -q --filter 'dangling=true') 2> /dev/null || true
|
2014-09-15 18:57:13 +00:00
|
|
|
}
|
|
|
|
|
2014-06-17 06:16:40 +00:00
|
|
|
# Run a command in the kube-build image. This assumes that the image has
|
|
|
|
# already been built. This will sync out all output data from the build.
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::build::run_build_command() {
|
2014-10-06 20:25:27 +00:00
|
|
|
[[ $# != 0 ]] || { echo "Invalid input." >&2; return 4; }
|
2014-06-17 06:16:40 +00:00
|
|
|
|
2014-10-09 19:52:32 +00:00
|
|
|
kube::build::prepare_output
|
|
|
|
|
2014-10-10 20:33:11 +00:00
|
|
|
local -a docker_run_opts=(
|
|
|
|
"--name=${KUBE_BUILD_CONTAINER_NAME}"
|
|
|
|
"${DOCKER_MOUNT_ARGS[@]}"
|
|
|
|
)
|
|
|
|
|
|
|
|
# If we have stdin we can run interactive. This allows things like 'shell.sh'
|
|
|
|
# to work. However, if we run this way and don't have stdin, then it ends up
|
|
|
|
# running in a daemon-ish mode. So if we don't have a stdin, we explicitly
|
|
|
|
# attach stderr/stdout but don't bother asking for a tty.
|
|
|
|
if [[ -t 0 ]]; then
|
|
|
|
docker_run_opts+=(--interactive --tty)
|
|
|
|
else
|
|
|
|
docker_run_opts+=(--attach=stdout --attach=stderr)
|
|
|
|
fi
|
|
|
|
|
2014-10-06 20:25:27 +00:00
|
|
|
local -ra docker_cmd=(
|
2014-10-10 20:33:11 +00:00
|
|
|
docker run "${docker_run_opts[@]}" "${KUBE_BUILD_IMAGE}")
|
2014-06-17 06:16:40 +00:00
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Remove the container if it is left over from some previous aborted run
|
2014-10-09 21:08:34 +00:00
|
|
|
docker rm "${KUBE_BUILD_CONTAINER_NAME}" >/dev/null 2>&1 || true
|
2014-10-06 20:25:27 +00:00
|
|
|
"${docker_cmd[@]}" "$@"
|
2014-09-23 22:54:27 +00:00
|
|
|
|
|
|
|
# Remove the container after we run. '--rm' might be appropriate but it
|
|
|
|
# appears that sometimes it fails. See
|
|
|
|
# https://github.com/docker/docker/issues/3968
|
2014-10-09 21:08:34 +00:00
|
|
|
docker rm "${KUBE_BUILD_CONTAINER_NAME}" >/dev/null 2>&1 || true
|
2014-06-17 06:16:40 +00:00
|
|
|
}
|
|
|
|
|
2014-10-17 20:56:16 +00:00
|
|
|
# Test if the output directory is remote (and can only be accessed through
|
|
|
|
# docker) or if it is "local" and we can access the output without going through
|
|
|
|
# docker.
|
|
|
|
function kube::build::is_output_remote() {
|
|
|
|
rm -f "${LOCAL_OUTPUT_BUILD}/test_for_remote"
|
|
|
|
kube::build::run_build_command touch "${REMOTE_OUTPUT_DIR}/test_for_remote"
|
|
|
|
|
|
|
|
[[ ! -e "${LOCAL_OUTPUT_BUILD}/test_for_remote" ]]
|
|
|
|
}
|
|
|
|
|
2014-06-17 06:16:40 +00:00
|
|
|
# If the Docker server is remote, copy the results back out.
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::build::copy_output() {
|
2014-10-17 20:56:16 +00:00
|
|
|
if kube::build::is_output_remote; then
|
|
|
|
# When we are on the Mac with boot2docker (or to a remote Docker in any
|
|
|
|
# other situation) we need to copy the results back out. Ideally we would
|
|
|
|
# leave the container around and use 'docker cp' to copy the results out.
|
|
|
|
# However, that doesn't work for mounted volumes currently
|
|
|
|
# (https://github.com/dotcloud/docker/issues/1992). And it is just plain
|
|
|
|
# broken (https://github.com/dotcloud/docker/issues/6483).
|
2014-06-17 06:16:40 +00:00
|
|
|
#
|
|
|
|
# The easiest thing I (jbeda) could figure out was to launch another
|
|
|
|
# container pointed at the same volume, tar the output directory and ship
|
2014-10-17 20:56:16 +00:00
|
|
|
# that tar over stdout.
|
2014-06-17 06:16:40 +00:00
|
|
|
|
2014-08-29 20:51:16 +00:00
|
|
|
echo "+++ Syncing back _output directory from boot2docker VM"
|
2014-09-15 18:57:13 +00:00
|
|
|
rm -rf "${LOCAL_OUTPUT_BUILD}"
|
|
|
|
mkdir -p "${LOCAL_OUTPUT_BUILD}"
|
2014-06-17 06:16:40 +00:00
|
|
|
|
2014-10-17 20:56:16 +00:00
|
|
|
# The '</dev/null' here makes us run docker in a "non-interactive" mode. Not
|
|
|
|
# doing this corrupts the output stream.
|
|
|
|
kube::build::run_build_command sh -c "tar c -C ${REMOTE_OUTPUT_DIR} . ; sleep 1" </dev/null \
|
|
|
|
| tar xv -C "${LOCAL_OUTPUT_BUILD}"
|
2014-09-23 22:54:27 +00:00
|
|
|
|
2014-06-17 06:16:40 +00:00
|
|
|
# I (jbeda) also tried getting rsync working using 'docker run' as the
|
|
|
|
# 'remote shell'. This mostly worked but there was a hang when
|
|
|
|
# closing/finishing things off. Ug.
|
|
|
|
#
|
2014-10-09 21:08:34 +00:00
|
|
|
# local DOCKER="docker run -i --rm --name=${KUBE_BUILD_CONTAINER_NAME} ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
|
2014-06-17 06:16:40 +00:00
|
|
|
# DOCKER+=" bash -c 'shift ; exec \"\$@\"' --"
|
2014-09-15 18:57:13 +00:00
|
|
|
# rsync --blocking-io -av -e "${DOCKER}" foo:${REMOTE_OUTPUT_DIR}/ ${LOCAL_OUTPUT_BUILD}
|
2014-10-17 20:56:16 +00:00
|
|
|
else
|
|
|
|
echo "+++ Output directory is local. No need to copy results out."
|
2014-06-17 06:16:40 +00:00
|
|
|
fi
|
|
|
|
}
|
2014-06-17 23:10:57 +00:00
|
|
|
|
2014-06-20 18:08:50 +00:00
|
|
|
# ---------------------------------------------------------------------------
|
2014-09-08 18:00:46 +00:00
|
|
|
# Build final release artifacts
|
2014-09-15 18:57:13 +00:00
|
|
|
function kube::release::package_tarballs() {
|
|
|
|
# Clean out any old releases
|
|
|
|
rm -rf "${RELEASE_DIR}"
|
2014-09-08 18:00:46 +00:00
|
|
|
mkdir -p "${RELEASE_DIR}"
|
|
|
|
|
2014-09-15 18:57:13 +00:00
|
|
|
kube::release::package_client_tarballs
|
2014-09-23 22:54:27 +00:00
|
|
|
kube::release::package_server_tarballs
|
|
|
|
kube::release::package_salt_tarball
|
|
|
|
kube::release::package_full_tarball
|
2014-09-15 18:57:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Package up all of the cross compiled clients. Over time this should grow into
|
|
|
|
# a full SDK
|
|
|
|
function kube::release::package_client_tarballs() {
|
|
|
|
# Find all of the built kubecfg binaries
|
|
|
|
local platform platforms
|
|
|
|
platforms=($(cd "${LOCAL_OUTPUT_ROOT}/build" ; echo */*))
|
|
|
|
for platform in "${platforms[@]}" ; do
|
|
|
|
local platform_tag=${platform/\//-} # Replace a "/" for a "-"
|
2014-09-23 22:54:27 +00:00
|
|
|
echo "+++ Building tarball: client $platform_tag"
|
2014-09-15 18:57:13 +00:00
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
local release_stage="${LOCAL_OUTPUT_ROOT}/release-stage/client/${platform_tag}/kubernetes"
|
2014-09-15 18:57:13 +00:00
|
|
|
rm -rf "${release_stage}"
|
2014-09-23 22:54:27 +00:00
|
|
|
mkdir -p "${release_stage}/client/bin"
|
2014-09-15 18:57:13 +00:00
|
|
|
|
|
|
|
# This fancy expression will expand to prepend a path
|
|
|
|
# (${LOCAL_OUTPUT_ROOT}/build/${platform}/) to every item in the
|
|
|
|
# KUBE_CLIENT_BINARIES array.
|
2014-09-23 22:54:27 +00:00
|
|
|
cp "${KUBE_CLIENT_BINARIES[@]/#/${LOCAL_OUTPUT_ROOT}/build/${platform}/}" \
|
|
|
|
"${release_stage}/client/bin/"
|
2014-09-15 18:57:13 +00:00
|
|
|
|
|
|
|
local package_name="${RELEASE_DIR}/kubernetes-client-${platform_tag}.tar.gz"
|
2014-10-30 17:49:44 +00:00
|
|
|
kube::release::create_tarball "${package_name}" "${release_stage}/.."
|
2014-09-15 18:57:13 +00:00
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
# Package up all of the server binaries
|
2014-09-23 22:54:27 +00:00
|
|
|
function kube::release::package_server_tarballs() {
|
2014-09-08 18:00:46 +00:00
|
|
|
local platform
|
2014-09-15 18:57:13 +00:00
|
|
|
for platform in "${KUBE_SERVER_PLATFORMS[@]}" ; do
|
|
|
|
local platform_tag=${platform/\//-} # Replace a "/" for a "-"
|
2014-09-23 22:54:27 +00:00
|
|
|
echo "+++ Building tarball: server $platform_tag"
|
2014-09-08 18:00:46 +00:00
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
local release_stage="${LOCAL_OUTPUT_ROOT}/release-stage/server/${platform_tag}/kubernetes"
|
2014-09-15 18:57:13 +00:00
|
|
|
rm -rf "${release_stage}"
|
2014-09-23 22:54:27 +00:00
|
|
|
mkdir -p "${release_stage}/server/bin"
|
2014-09-08 18:00:46 +00:00
|
|
|
|
2014-09-15 18:57:13 +00:00
|
|
|
# This fancy expression will expand to prepend a path
|
|
|
|
# (${LOCAL_OUTPUT_ROOT}/build/${platform}/) to every item in the
|
|
|
|
# KUBE_SERVER_BINARIES array.
|
2014-09-23 22:54:27 +00:00
|
|
|
cp "${KUBE_SERVER_BINARIES[@]/#/${LOCAL_OUTPUT_ROOT}/build/${platform}/}" \
|
|
|
|
"${release_stage}/server/bin/"
|
2014-09-08 18:00:46 +00:00
|
|
|
|
2014-09-15 18:57:13 +00:00
|
|
|
local package_name="${RELEASE_DIR}/kubernetes-server-${platform_tag}.tar.gz"
|
2014-10-30 17:49:44 +00:00
|
|
|
kube::release::create_tarball "${package_name}" "${release_stage}/.."
|
2014-09-08 18:00:46 +00:00
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Package up the salt configuration tree. This is an optional helper to getting
|
|
|
|
# a cluster up and running.
|
|
|
|
function kube::release::package_salt_tarball() {
|
|
|
|
echo "+++ Building tarball: salt"
|
|
|
|
|
|
|
|
local release_stage="${LOCAL_OUTPUT_ROOT}/release-stage/salt/kubernetes"
|
|
|
|
rm -rf "${release_stage}"
|
|
|
|
mkdir -p "${release_stage}"
|
|
|
|
|
2014-10-03 21:58:49 +00:00
|
|
|
cp -R "${KUBE_ROOT}/cluster/saltbase" "${release_stage}/"
|
2014-09-23 22:54:27 +00:00
|
|
|
|
|
|
|
local package_name="${RELEASE_DIR}/kubernetes-salt.tar.gz"
|
2014-10-30 17:49:44 +00:00
|
|
|
kube::release::create_tarball "${package_name}" "${release_stage}/.."
|
2014-09-23 22:54:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# This is all the stuff you need to run/install kubernetes. This includes:
|
|
|
|
# - precompiled binaries for client
|
|
|
|
# - Cluster spin up/down scripts and configs for various cloud providers
|
|
|
|
# - tarballs for server binary and salt configs that are ready to be uploaded
|
|
|
|
# to master by whatever means appropriate.
|
|
|
|
function kube::release::package_full_tarball() {
|
|
|
|
echo "+++ Building tarball: full"
|
|
|
|
|
|
|
|
local release_stage="${LOCAL_OUTPUT_ROOT}/release-stage/full/kubernetes"
|
|
|
|
rm -rf "${release_stage}"
|
|
|
|
mkdir -p "${release_stage}"
|
|
|
|
|
|
|
|
cp -R "${LOCAL_OUTPUT_ROOT}/build" "${release_stage}/platforms"
|
|
|
|
|
|
|
|
# We want everything in /cluster except saltbase. That is only needed on the
|
|
|
|
# server.
|
2014-10-03 21:58:49 +00:00
|
|
|
cp -R "${KUBE_ROOT}/cluster" "${release_stage}/"
|
2014-09-23 22:54:27 +00:00
|
|
|
rm -rf "${release_stage}/cluster/saltbase"
|
|
|
|
|
|
|
|
mkdir -p "${release_stage}/server"
|
|
|
|
cp "${RELEASE_DIR}/kubernetes-salt.tar.gz" "${release_stage}/server/"
|
|
|
|
cp "${RELEASE_DIR}"/kubernetes-server-*.tar.gz "${release_stage}/server/"
|
|
|
|
|
2014-09-29 20:37:04 +00:00
|
|
|
mkdir -p "${release_stage}/third_party"
|
2014-10-03 21:58:49 +00:00
|
|
|
cp -R "${KUBE_ROOT}/third_party/htpasswd" "${release_stage}/third_party/htpasswd"
|
2014-09-29 20:37:04 +00:00
|
|
|
|
2014-10-03 21:58:49 +00:00
|
|
|
cp -R "${KUBE_ROOT}/examples" "${release_stage}/"
|
|
|
|
cp "${KUBE_ROOT}/README.md" "${release_stage}/"
|
|
|
|
cp "${KUBE_ROOT}/LICENSE" "${release_stage}/"
|
|
|
|
cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/"
|
2014-09-23 22:54:27 +00:00
|
|
|
|
|
|
|
local package_name="${RELEASE_DIR}/kubernetes.tar.gz"
|
2014-10-30 17:49:44 +00:00
|
|
|
kube::release::create_tarball "${package_name}" "${release_stage}/.."
|
2014-09-23 22:54:27 +00:00
|
|
|
}
|
|
|
|
|
2014-10-30 17:49:44 +00:00
|
|
|
# Build a release tarball. $1 is the output tar name. $2 is the base directory
|
|
|
|
# of the files to be packaged. This assumes that ${2}/kubernetes is what is
|
|
|
|
# being packaged.
|
|
|
|
function kube::release::create_tarball() {
|
|
|
|
local tarfile=$1
|
|
|
|
local stagingdir=$2
|
|
|
|
|
|
|
|
# Find gnu tar if it is available
|
|
|
|
local tar=tar
|
|
|
|
if which gtar > /dev/null; then
|
|
|
|
tar=gtar
|
|
|
|
fi
|
|
|
|
|
|
|
|
local tar_cmd=("$tar" "czf" "${tarfile}" "-C" "${stagingdir}" "kubernetes")
|
|
|
|
if "$tar" --version | grep -q GNU; then
|
|
|
|
tar_cmd=("${tar_cmd[@]}" "--owner=0" "--group=0")
|
|
|
|
else
|
|
|
|
echo " !!! GNU tar not available. User names will be embedded in output and"
|
|
|
|
echo " release tars are not official. Build on Linux or install GNU tar"
|
|
|
|
echo " on Mac OS X (brew install gnu-tar)"
|
|
|
|
fi
|
|
|
|
|
|
|
|
"${tar_cmd[@]}"
|
|
|
|
}
|
2014-09-23 22:54:27 +00:00
|
|
|
|
2014-09-08 18:00:46 +00:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# GCS Release
|
|
|
|
|
|
|
|
function kube::release::gcs::release() {
|
2014-10-07 20:49:18 +00:00
|
|
|
[[ ${KUBE_GCS_UPLOAD_RELEASE} =~ ^[yY]$ ]] || return 0
|
2014-09-15 18:57:13 +00:00
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
kube::release::gcs::verify_prereqs
|
|
|
|
kube::release::gcs::ensure_release_bucket
|
|
|
|
kube::release::gcs::push_images
|
|
|
|
kube::release::gcs::copy_release_tarballs
|
2014-09-08 18:00:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Verify things are set up for uploading to GCS
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::release::gcs::verify_prereqs() {
|
2014-09-08 18:00:46 +00:00
|
|
|
if [[ -z "$(which gsutil)" || -z "$(which gcloud)" ]]; then
|
|
|
|
echo "Releasing Kubernetes requires gsutil and gcloud. Please download,"
|
|
|
|
echo "install and authorize through the Google Cloud SDK: "
|
|
|
|
echo
|
|
|
|
echo " https://developers.google.com/cloud/sdk/"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ -z "${GCLOUD_ACCOUNT-}" ]]; then
|
|
|
|
GCLOUD_ACCOUNT=$(gcloud auth list 2>/dev/null | awk '/(active)/ { print $2 }')
|
|
|
|
fi
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${GCLOUD_ACCOUNT-}" ]]; then
|
2014-09-08 18:00:46 +00:00
|
|
|
echo "No account authorized through gcloud. Please fix with:"
|
|
|
|
echo
|
|
|
|
echo " gcloud auth login"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ -z "${GCLOUD_PROJECT-}" ]]; then
|
|
|
|
GCLOUD_PROJECT=$(gcloud config list project | awk '{project = $3} END {print project}')
|
|
|
|
fi
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${GCLOUD_PROJECT-}" ]]; then
|
2014-09-08 18:00:46 +00:00
|
|
|
echo "No account authorized through gcloud. Please fix with:"
|
|
|
|
echo
|
|
|
|
echo " gcloud config set project <project id>"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
2014-06-20 18:08:50 +00:00
|
|
|
|
|
|
|
# Create a unique bucket name for releasing Kube and make sure it exists.
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::release::gcs::ensure_release_bucket() {
|
2014-09-08 18:00:46 +00:00
|
|
|
local project_hash
|
2014-10-09 21:08:34 +00:00
|
|
|
project_hash=$(kube::build::short_hash "$GCLOUD_PROJECT")
|
2014-10-01 18:42:45 +00:00
|
|
|
KUBE_GCS_RELEASE_BUCKET=${KUBE_GCS_RELEASE_BUCKET-kubernetes-releases-${project_hash}}
|
|
|
|
KUBE_GCS_RELEASE_PREFIX=${KUBE_GCS_RELEASE_PREFIX-devel/}
|
|
|
|
KUBE_GCS_DOCKER_REG_PREFIX=${KUBE_GCS_DOCKER_REG_PREFIX-docker-reg/}
|
2014-06-20 18:08:50 +00:00
|
|
|
|
2014-10-06 20:25:27 +00:00
|
|
|
if ! gsutil ls "gs://${KUBE_GCS_RELEASE_BUCKET}" >/dev/null 2>&1 ; then
|
2014-10-20 23:12:30 +00:00
|
|
|
echo "Creating Google Cloud Storage bucket: $KUBE_GCS_RELEASE_BUCKET"
|
|
|
|
gsutil mb -p "${GCLOUD_PROJECT}" "gs://${KUBE_GCS_RELEASE_BUCKET}"
|
2014-06-20 18:08:50 +00:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::release::gcs::ensure_docker_registry() {
|
|
|
|
local -r reg_container_name="gcs-registry"
|
2014-06-20 18:08:50 +00:00
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
local -r running=$(docker inspect ${reg_container_name} 2>/dev/null \
|
2014-06-20 18:08:50 +00:00
|
|
|
| build/json-extractor.py 0.State.Running 2>/dev/null)
|
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
[[ "$running" != "true" ]] || return 0
|
2014-06-20 18:08:50 +00:00
|
|
|
|
|
|
|
# Grovel around and find the OAuth token in the gcloud config
|
2014-09-09 22:03:32 +00:00
|
|
|
local -r boto=~/.config/gcloud/legacy_credentials/${GCLOUD_ACCOUNT}/.boto
|
2014-10-06 20:25:27 +00:00
|
|
|
local refresh_token
|
|
|
|
refresh_token=$(grep 'gs_oauth2_refresh_token =' "$boto" | awk '{ print $3 }')
|
2014-06-20 18:08:50 +00:00
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
if [[ -z "$refresh_token" ]]; then
|
|
|
|
echo "Couldn't find OAuth 2 refresh token in ${boto}" >&2
|
2014-06-20 18:08:50 +00:00
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
# If we have an old one sitting around, remove it
|
2014-09-09 22:03:32 +00:00
|
|
|
docker rm ${reg_container_name} >/dev/null 2>&1 || true
|
2014-06-20 18:08:50 +00:00
|
|
|
|
|
|
|
echo "+++ Starting GCS backed Docker registry"
|
2014-10-06 20:25:27 +00:00
|
|
|
local -ra docker_cmd=(
|
|
|
|
docker run -d "--name=${reg_container_name}"
|
|
|
|
-e "GCS_BUCKET=${KUBE_GCS_RELEASE_BUCKET}"
|
|
|
|
-e "STORAGE_PATH=${KUBE_GCS_DOCKER_REG_PREFIX}"
|
|
|
|
-e "GCP_OAUTH2_REFRESH_TOKEN=${refresh_token}"
|
|
|
|
-p 127.0.0.1:5000:5000
|
|
|
|
google/docker-registry
|
|
|
|
)
|
|
|
|
|
|
|
|
"${docker[@]}"
|
2014-06-20 18:08:50 +00:00
|
|
|
|
|
|
|
# Give it time to spin up before we start throwing stuff at it
|
|
|
|
sleep 5
|
|
|
|
}
|
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::release::gcs::push_images() {
|
2014-10-07 20:49:18 +00:00
|
|
|
[[ ${KUBE_BUILD_RUN_IMAGES} =~ ^[yY]$ ]] || return 0
|
2014-09-15 18:57:13 +00:00
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
kube::release::gcs::ensure_docker_registry
|
2014-06-20 18:08:50 +00:00
|
|
|
|
|
|
|
# Tag each of our run binaries with the right registry and push
|
2014-09-09 22:03:32 +00:00
|
|
|
local b image_name
|
2014-09-15 18:57:13 +00:00
|
|
|
for b in "${KUBE_RUN_IMAGES[@]}" ; do
|
2014-09-09 22:03:32 +00:00
|
|
|
image_name="${KUBE_RUN_IMAGE_BASE}-${b}"
|
2014-10-01 18:42:45 +00:00
|
|
|
echo "+++ Tagging and pushing ${image_name} to GCS bucket ${KUBE_GCS_RELEASE_BUCKET}"
|
2014-09-09 22:03:32 +00:00
|
|
|
docker tag "${KUBE_RUN_IMAGE_BASE}-$b" "localhost:5000/${image_name}"
|
|
|
|
docker push "localhost:5000/${image_name}"
|
|
|
|
docker rmi "localhost:5000/${image_name}"
|
2014-06-20 18:08:50 +00:00
|
|
|
done
|
|
|
|
}
|
2014-06-17 23:10:57 +00:00
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
function kube::release::gcs::copy_release_tarballs() {
|
2014-06-20 22:17:14 +00:00
|
|
|
# TODO: This isn't atomic. There will be points in time where there will be
|
|
|
|
# no active release. Also, if something fails, the release could be half-
|
|
|
|
# copied. The real way to do this would perhaps to have some sort of release
|
|
|
|
# version so that we are never overwriting a destination.
|
2014-10-01 18:42:45 +00:00
|
|
|
local -r gcs_destination="gs://${KUBE_GCS_RELEASE_BUCKET}/${KUBE_GCS_RELEASE_PREFIX}"
|
2014-10-01 16:37:45 +00:00
|
|
|
local gcs_options=()
|
|
|
|
|
2014-10-07 20:49:18 +00:00
|
|
|
if [[ ${KUBE_GCS_NO_CACHING} =~ ^[yY]$ ]]; then
|
2014-10-01 16:37:45 +00:00
|
|
|
gcs_options=("-h" "Cache-Control:private, max-age=0")
|
|
|
|
fi
|
2014-06-20 22:17:14 +00:00
|
|
|
|
2014-09-09 22:03:32 +00:00
|
|
|
echo "+++ Copying client tarballs to ${gcs_destination}"
|
2014-06-20 22:17:14 +00:00
|
|
|
|
|
|
|
# First delete all objects at the destination
|
2014-09-09 22:03:32 +00:00
|
|
|
gsutil -q rm -f -R "${gcs_destination}" >/dev/null 2>&1 || true
|
2014-06-20 22:17:14 +00:00
|
|
|
|
|
|
|
# Now upload everything in release directory
|
2014-10-20 23:12:30 +00:00
|
|
|
gsutil -m "${gcs_options[@]+${gcs_options[@]}}" cp -r "${RELEASE_DIR}"/* "${gcs_destination}"
|
|
|
|
|
|
|
|
# TODO(jbeda): Generate an HTML page with links for this release so it is easy
|
|
|
|
# to see it. For extra credit, generate a dynamic page that builds up the
|
|
|
|
# release list using the GCS JSON API. Use Angular and Bootstrap for extra
|
|
|
|
# extra credit.
|
2014-10-01 16:37:45 +00:00
|
|
|
|
2014-10-07 20:49:18 +00:00
|
|
|
if [[ ${KUBE_GCS_MAKE_PUBLIC} =~ ^[yY]$ ]]; then
|
2014-10-01 16:37:45 +00:00
|
|
|
gsutil acl ch -R -g all:R "${gcs_destination}" >/dev/null 2>&1
|
|
|
|
fi
|
|
|
|
|
|
|
|
gsutil ls -lh "${gcs_destination}"
|
2014-06-20 22:17:14 +00:00
|
|
|
}
|