Merge branch 'master' of github.com:GoogleCloudPlatform/kubernetes

pull/6/head
Max Forbes 2014-09-10 13:36:55 -07:00
commit 6a583ebcf5
36 changed files with 467 additions and 251 deletions

View File

@ -7,6 +7,7 @@
# clean: Clean up.
OUT_DIR = _output
GODEPS_PKG_DIR = Godeps/_workspace/pkg
export GOFLAGS
@ -48,4 +49,5 @@ check test:
# make clean
clean:
rm -rf $(OUT_DIR)
rm -rf $(GODEPS_PKG_DIR)
.PHONY: clean

View File

@ -24,6 +24,7 @@ While the concepts and architecture in Kubernetes represent years of experience
* [Microsoft Azure](docs/getting-started-guides/azure.md)
* [Rackspace](docs/getting-started-guides/rackspace.md)
* [Circle CI](https://circleci.com/docs/docker#google-compute-engine-and-kubernetes)
* [Digital Ocean](https://github.com/bketelsen/coreos-kubernetes-digitalocean)
* [kubecfg command line tool](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/cli.md)
* [Kubernetes API Documentation](http://cdn.rawgit.com/GoogleCloudPlatform/kubernetes/31a0daae3627c91bc96e1f02a6344cd76e294791/api/kubernetes.html)
* [Kubernetes Client Libraries](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/client-libraries.md)

View File

@ -49,13 +49,21 @@ ENV GOPATH /go
ENV GOOS linux
ENV GOARCH amd64
# Get the code coverage tool and etcd for integration tests
RUN go get code.google.com/p/go.tools/cmd/cover github.com/coreos/etcd github.com/tools/godep
# Get the code coverage tool and godep
RUN go get code.google.com/p/go.tools/cmd/cover github.com/tools/godep
RUN mkdir -p /go/src/github.com/coreos/etcd && \
cd /go/src/github.com/coreos/etcd && \
git clone https://github.com/coreos/etcd.git . -b v0.4.6 --depth=1 && \
go install github.com/coreos/etcd
# Mark this as a kube-build container
RUN touch /kube-build-image
WORKDIR /go/src/github.com/GoogleCloudPlatform/kubernetes
# Propagate the git tree version into the build image
ADD kube-version-defs /kube-version-defs
# Upload Kubernetes source
ADD kube-source.tar.gz /go/src/github.com/GoogleCloudPlatform/kubernetes

View File

@ -25,34 +25,53 @@ readonly KUBE_GO_PACKAGE=github.com/GoogleCloudPlatform/kubernetes
mkdir -p "${KUBE_TARGET}"
if [[ ! -f "/kube-build-image" ]]; then
echo "WARNING: This script should be run in the kube-build conrtainer image!" >&2
echo "WARNING: This script should be run in the kube-build container image!" >&2
fi
function make-binaries() {
readonly BINARIES="
proxy
integration
apiserver
controller-manager
kubelet
kubecfg"
if [[ -f "/kube-version-defs" ]]; then
source "/kube-version-defs"
else
echo "WARNING: No version information provided in build image"
fi
function kube::build::make_binary() {
local -r gopkg=$1
local -r bin=${gopkg##*/}
echo "+++ Building ${bin} for ${GOOS}/${GOARCH}"
pushd "${KUBE_REPO_ROOT}" >/dev/null
godep go build -ldflags "${KUBE_LD_FLAGS-}" -o "${ARCH_TARGET}/${bin}" "${gopkg}"
popd >/dev/null
}
function kube::build::make_binaries() {
if [[ ${#targets[@]} -eq 0 ]]; then
targets=(
cmd/proxy
cmd/apiserver
cmd/controller-manager
cmd/kubelet
cmd/kubecfg
plugin/cmd/scheduler
)
fi
binaries=()
local target
for target in "${targets[@]}"; do
binaries+=("${KUBE_GO_PACKAGE}/${target}")
done
ARCH_TARGET="${KUBE_TARGET}/${GOOS}/${GOARCH}"
mkdir -p "${ARCH_TARGET}"
function make-binary() {
echo "+++ Building $1 for ${GOOS}/${GOARCH}"
godep go build \
-o "${ARCH_TARGET}/$1" \
github.com/GoogleCloudPlatform/kubernetes/cmd/$1
}
if [[ -n $1 ]]; then
make-binary $1
if [[ -n "$1" ]]; then
kube::build::make_binary "$1"
exit 0
fi
for b in ${BINARIES}; do
make-binary $b
local b
for b in "${binaries[@]}"; do
kube::build::make_binary "$b"
done
}

View File

@ -14,10 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# This and builds all go components.
set -e
source $(dirname $0)/common.sh
make-binaries "$@"
kube::build::make_binaries "$@"

View File

@ -14,22 +14,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# This and builds all go components.
set -e
source $(dirname $0)/common.sh
readonly CROSS_BINARIES="
kubecfg
"
readonly CROSS_BINARIES=(
./cmd/kubecfg
)
for platform in ${KUBE_CROSSPLATFORMS}; do
(
export GOOS=${platform%/*}
export GOARCH=${platform##*/}
for binary in ${CROSS_BINARIES}; do
make-binaries "${binary}"
for binary in "${CROSS_BINARIES[@]}"; do
kube::build::make_binaries "${binary}"
done
)
done

View File

@ -18,13 +18,15 @@ set -e
source $(dirname $0)/common.sh
ETCD_DIR="${KUBE_REPO_ROOT}/_output/etcd"
kube::build::make_binaries "./cmd/integration"
readonly ETCD_DIR="${KUBE_REPO_ROOT}/_output/etcd"
mkdir -p "${ETCD_DIR}"
echo "+++ Running integration test"
etcd -name test -data-dir ${ETCD_DIR} > "${KUBE_REPO_ROOT}/_output/etcd.log" &
ETCD_PID=$!
readonly ETCD_PID=$!
sleep 5

View File

@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source $(dirname $0)/common.sh

View File

@ -17,6 +17,9 @@
# Common utilties, variables and checks for all build scripts.
cd $(dirname "${BASH_SOURCE}")/..
source hack/config-go.sh
readonly KUBE_REPO_ROOT="${PWD}"
readonly KUBE_GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null)
@ -38,11 +41,13 @@ readonly DOCKER_CONTAINER_NAME=kube-build
readonly DOCKER_MOUNT="-v ${LOCAL_OUTPUT_DIR}:${REMOTE_OUTPUT_DIR}"
readonly KUBE_RUN_IMAGE_BASE="kubernetes"
readonly KUBE_RUN_BINARIES="
readonly KUBE_RUN_BINARIES=(
apiserver
controller-manager
proxy
"
scheduler
)
# This is where the final release artifacts are created locally
readonly RELEASE_DIR="${KUBE_REPO_ROOT}/_output/release"
@ -51,7 +56,7 @@ readonly RELEASE_DIR="${KUBE_REPO_ROOT}/_output/release"
# Basic setup functions
# Verify that the right utilities and such are installed for building Kube.
function verify-prereqs() {
function kube::build::verify_prereqs() {
if [[ -z "$(which docker)" ]]; then
echo "Can't find 'docker' in PATH, please fix and retry." >&2
echo "See https://docs.docker.com/installation/#installation for installation instructions." >&2
@ -82,42 +87,13 @@ function verify-prereqs() {
fi
}
# Verify things are set up for uploading to GCS
function verify-gcs-prereqs() {
if [[ -z "$(which gsutil)" || -z "$(which gcloud)" ]]; then
echo "Releasing Kubernetes requires gsutil and gcloud. Please download,"
echo "install and authorize through the Google Cloud SDK: "
echo
echo " https://developers.google.com/cloud/sdk/"
return 1
fi
FIND_ACCOUNT="gcloud auth list 2>/dev/null | grep '(active)' | awk '{ print \$2 }'"
GCLOUD_ACCOUNT=${GCLOUD_ACCOUNT-$(eval ${FIND_ACCOUNT})}
if [[ -z "${GCLOUD_ACCOUNT}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud auth login"
return 1
fi
FIND_PROJECT="gcloud config list project | tail -n 1 | awk '{ print \$3 }'"
GCLOUD_PROJECT=${GCLOUD_PROJECT-$(eval ${FIND_PROJECT})}
if [[ -z "${GCLOUD_PROJECT}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud config set project <project id>"
return 1
fi
}
# ---------------------------------------------------------------------------
# Building
# Set up the context directory for the kube-build image and build it.
function build-image() {
local -r BUILD_CONTEXT_DIR="${KUBE_REPO_ROOT}/_output/images/${KUBE_BUILD_IMAGE}"
local -r SOURCE="
function kube::build::build_image() {
local -r build_context_dir="${KUBE_REPO_ROOT}/_output/images/${KUBE_BUILD_IMAGE}"
local -r source=(
api
build
cmd
@ -125,78 +101,103 @@ function build-image() {
Godeps
hack
LICENSE
README.md
pkg
plugin
README.md
third_party
"
mkdir -p ${BUILD_CONTEXT_DIR}
tar czf ${BUILD_CONTEXT_DIR}/kube-source.tar.gz ${SOURCE}
cp build/build-image/Dockerfile ${BUILD_CONTEXT_DIR}/Dockerfile
docker-build "${KUBE_BUILD_IMAGE}" "${BUILD_CONTEXT_DIR}"
)
mkdir -p "${build_context_dir}"
tar czf "${build_context_dir}/kube-source.tar.gz" "${source[@]}"
cat >"${build_context_dir}/kube-version-defs" <<EOF
KUBE_LD_FLAGS="$(kube::version_ldflags)"
EOF
cp build/build-image/Dockerfile ${build_context_dir}/Dockerfile
kube::build::docker_build "${KUBE_BUILD_IMAGE}" "${build_context_dir}"
}
# Builds the runtime image. Assumes that the appropriate binaries are already
# built and in _output/build/.
function run-image() {
local -r BUILD_CONTEXT_BASE="${KUBE_REPO_ROOT}/_output/images/${KUBE_RUN_IMAGE_BASE}"
function kube::build::run_image() {
local -r build_context_base="${KUBE_REPO_ROOT}/_output/images/${KUBE_RUN_IMAGE_BASE}"
# First build the base image. This one brings in all of the binaries.
mkdir -p "${BUILD_CONTEXT_BASE}"
tar czf ${BUILD_CONTEXT_BASE}/kube-bins.tar.gz \
mkdir -p "${build_context_base}"
tar czf "${build_context_base}/kube-bins.tar.gz" \
-C "_output/build/linux/amd64" \
${KUBE_RUN_BINARIES}
cp -R build/run-images/base/* "${BUILD_CONTEXT_BASE}/"
docker-build "${KUBE_RUN_IMAGE_BASE}" "${BUILD_CONTEXT_BASE}"
"${KUBE_RUN_BINARIES[@]}"
cp -R build/run-images/base/* "${build_context_base}/"
kube::build::docker_build "${KUBE_RUN_IMAGE_BASE}" "${build_context_base}"
for b in $KUBE_RUN_BINARIES ; do
local SUB_CONTEXT_DIR="${BUILD_CONTEXT_BASE}-$b"
mkdir -p "${SUB_CONTEXT_DIR}"
cp -R build/run-images/$b/* "${SUB_CONTEXT_DIR}/"
docker-build "${KUBE_RUN_IMAGE_BASE}-$b" "${SUB_CONTEXT_DIR}"
local b
for b in "${KUBE_RUN_BINARIES[@]}" ; do
local sub_context_dir="${build_context_base}-$b"
mkdir -p "${sub_context_dir}"
cp -R build/run-images/$b/* "${sub_context_dir}/"
kube::build::docker_build "${KUBE_RUN_IMAGE_BASE}-$b" "${sub_context_dir}"
done
}
function kube::build::clean_images() {
kube::build::clean_image "${KUBE_BUILD_IMAGE}"
kube::build::clean_image "${KUBE_RUN_IMAGE_BASE}"
local b
for b in "${KUBE_RUN_BINARIES[@]}" ; do
kube::build::clean_image "${KUBE_RUN_IMAGE_BASE}-${b}"
done
echo "+++ Cleaning all other untagged docker images"
docker rmi $(docker images | awk '/^<none>/ {print $3}') 2> /dev/null || true
}
# Build a docker image from a Dockerfile.
# $1 is the name of the image to build
# $2 is the location of the "context" directory, with the Dockerfile at the root.
function docker-build() {
local -r IMAGE=$1
local -r CONTEXT_DIR=$2
local -r BUILD_CMD="docker build -t ${IMAGE} ${CONTEXT_DIR}"
function kube::build::docker_build() {
local -r image=$1
local -r context_dir=$2
local -r build_cmd="docker build -t ${image} ${context_dir}"
echo "+++ Building Docker image ${IMAGE}. This can take a while."
echo "+++ Building Docker image ${image}. This can take a while."
set +e # We are handling the error here manually
local -r DOCKER_OUTPUT="$(${BUILD_CMD} 2>&1)"
local -r docker_output="$(${build_cmd} 2>&1)"
if [ $? -ne 0 ]; then
set -e
echo "+++ Docker build command failed for ${IMAGE}" >&2
echo "+++ Docker build command failed for ${image}" >&2
echo >&2
echo "${DOCKER_OUTPUT}" >&2
echo "${docker_output}" >&2
echo >&2
echo "To retry manually, run:" >&2
echo >&2
echo " ${DOCKER_BUILD_CMD}" >&2
echo " ${build_cmd}" >&2
echo >&2
return 1
fi
set -e
}
function kube::build::clean_image() {
local -r image=$1
echo "+++ Deleting docker image ${image}"
docker rmi ${image} 2> /dev/null || true
}
# Run a command in the kube-build image. This assumes that the image has
# already been built. This will sync out all output data from the build.
function run-build-command() {
function kube::build::run_build_command() {
[[ -n "$@" ]] || { echo "Invalid input." >&2; return 4; }
local -r DOCKER="docker run --rm --name=${DOCKER_CONTAINER_NAME} -it ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
local -r docker="docker run --rm --name=${DOCKER_CONTAINER_NAME} -it ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
${DOCKER} "$@"
${docker} "$@"
}
# If the Docker server is remote, copy the results back out.
function copy-output() {
function kube::build::copy_output() {
if [[ "$OSTYPE" == "darwin"* ]]; then
# When we are on the Mac with boot2docker we need to copy the results back
# out. Ideally we would leave the container around and use 'docker cp' to
@ -207,7 +208,7 @@ function copy-output() {
# The easiest thing I (jbeda) could figure out was to launch another
# container pointed at the same volume, tar the output directory and ship
# that tar over stdou.
local DOCKER="docker run -a stdout --rm --name=${DOCKER_CONTAINER_NAME} ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
local -r docker="docker run -a stdout --rm --name=${DOCKER_CONTAINER_NAME} ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
# Kill any leftover container
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
@ -215,7 +216,7 @@ function copy-output() {
echo "+++ Syncing back _output directory from boot2docker VM"
mkdir -p "${LOCAL_OUTPUT_DIR}"
rm -rf "${LOCAL_OUTPUT_DIR}/*"
${DOCKER} sh -c "tar c -C ${REMOTE_OUTPUT_DIR} ." \
${docker} sh -c "tar c -C ${REMOTE_OUTPUT_DIR} ." \
| tar xv -C "${LOCAL_OUTPUT_DIR}"
# I (jbeda) also tried getting rsync working using 'docker run' as the
@ -229,17 +230,82 @@ function copy-output() {
}
# ---------------------------------------------------------------------------
# Release
# Build final release artifacts
# Package up all of the cross compiled clients
function kube::build::package_tarballs() {
mkdir -p "${RELEASE_DIR}"
# Find all of the built kubecfg binaries
local platform
for platform in _output/build/*/* ; do
local platform_tag=${platform}
platform_tag=${platform_tag#*/*/} # remove the first two path components
platform_tag=${platform_tag/\//-} # Replace a "/" for a "-"
echo "+++ Building client package for $platform_tag"
local client_release_stage="${KUBE_REPO_ROOT}/_output/release-stage/${platform_tag}/kubernetes"
mkdir -p "${client_release_stage}"
mkdir -p "${client_release_stage}/bin"
cp "${platform}"/* "${client_release_stage}/bin"
local client_package_name="${RELEASE_DIR}/kubernetes-${platform_tag}.tar.gz"
tar czf "${client_package_name}" -C "${client_release_stage}/.." .
done
}
# ---------------------------------------------------------------------------
# GCS Release
function kube::release::gcs::release() {
kube::release::gcs::verify_prereqs
kube::release::gcs::ensure_release_bucket
kube::release::gcs::push_images
kube::release::gcs::copy_release_tarballs
}
# Verify things are set up for uploading to GCS
function kube::release::gcs::verify_prereqs() {
if [[ -z "$(which gsutil)" || -z "$(which gcloud)" ]]; then
echo "Releasing Kubernetes requires gsutil and gcloud. Please download,"
echo "install and authorize through the Google Cloud SDK: "
echo
echo " https://developers.google.com/cloud/sdk/"
return 1
fi
if [[ -z "${GCLOUD_ACCOUNT-}" ]]; then
GCLOUD_ACCOUNT=$(gcloud auth list 2>/dev/null | awk '/(active)/ { print $2 }')
fi
if [[ -z "${GCLOUD_ACCOUNT}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud auth login"
return 1
fi
if [[ -z "${GCLOUD_PROJECT-}" ]]; then
GCLOUD_PROJECT=$(gcloud config list project | awk '{project = $3} END {print project}')
fi
if [[ -z "${GCLOUD_PROJECT}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud config set project <project id>"
return 1
fi
}
# Create a unique bucket name for releasing Kube and make sure it exists.
function ensure-gcs-release-bucket() {
function kube::release::gcs::ensure_release_bucket() {
local project_hash
if which md5 > /dev/null 2>&1; then
HASH=$(md5 -q -s "$GCLOUD_PROJECT")
project_hash=$(md5 -q -s "$GCLOUD_PROJECT")
else
HASH=$(echo -n "$GCLOUD_PROJECT" | md5sum)
project_hash=$(echo -n "$GCLOUD_PROJECT" | md5sum)
fi
HASH=${HASH:0:5}
KUBE_RELEASE_BUCKET=${KUBE_RELEASE_BUCKET-kubernetes-releases-$HASH}
project_hash=${project_hash:0:5}
KUBE_RELEASE_BUCKET=${KUBE_RELEASE_BUCKET-kubernetes-releases-${project_hash}}
KUBE_RELEASE_PREFIX=${KUBE_RELEASE_PREFIX-devel/}
KUBE_DOCKER_REG_PREFIX=${KUBE_DOCKER_REG_PREFIX-docker-reg/}
@ -249,87 +315,66 @@ function ensure-gcs-release-bucket() {
fi
}
function ensure-gcs-docker-registry() {
local -r REG_CONTAINER_NAME="gcs-registry"
function kube::release::gcs::ensure_docker_registry() {
local -r reg_container_name="gcs-registry"
local -r RUNNING=$(docker inspect ${REG_CONTAINER_NAME} 2>/dev/null \
local -r running=$(docker inspect ${reg_container_name} 2>/dev/null \
| build/json-extractor.py 0.State.Running 2>/dev/null)
[[ "$RUNNING" != "true" ]] || return 0
[[ "$running" != "true" ]] || return 0
# Grovel around and find the OAuth token in the gcloud config
local -r BOTO=~/.config/gcloud/legacy_credentials/${GCLOUD_ACCOUNT}/.boto
local -r REFRESH_TOKEN=$(grep 'gs_oauth2_refresh_token =' $BOTO | awk '{ print $3 }')
local -r boto=~/.config/gcloud/legacy_credentials/${GCLOUD_ACCOUNT}/.boto
local -r refresh_token=$(grep 'gs_oauth2_refresh_token =' $boto | awk '{ print $3 }')
if [[ -z $REFRESH_TOKEN ]]; then
echo "Couldn't find OAuth 2 refresh token in ${BOTO}" >&2
if [[ -z "$refresh_token" ]]; then
echo "Couldn't find OAuth 2 refresh token in ${boto}" >&2
return 1
fi
# If we have an old one sitting around, remove it
docker rm ${REG_CONTAINER_NAME} >/dev/null 2>&1 || true
docker rm ${reg_container_name} >/dev/null 2>&1 || true
echo "+++ Starting GCS backed Docker registry"
local DOCKER="docker run -d --name=${REG_CONTAINER_NAME} "
DOCKER+="-e GCS_BUCKET=${KUBE_RELEASE_BUCKET} "
DOCKER+="-e STORAGE_PATH=${KUBE_DOCKER_REG_PREFIX} "
DOCKER+="-e GCP_OAUTH2_REFRESH_TOKEN=${REFRESH_TOKEN} "
DOCKER+="-p 127.0.0.1:5000:5000 "
DOCKER+="google/docker-registry"
local docker="docker run -d --name=${reg_container_name} "
docker+="-e GCS_BUCKET=${KUBE_RELEASE_BUCKET} "
docker+="-e STORAGE_PATH=${KUBE_DOCKER_REG_PREFIX} "
docker+="-e GCP_OAUTH2_REFRESH_TOKEN=${refresh_token} "
docker+="-p 127.0.0.1:5000:5000 "
docker+="google/docker-registry"
${DOCKER}
${docker}
# Give it time to spin up before we start throwing stuff at it
sleep 5
}
function push-images-to-gcs() {
ensure-gcs-docker-registry
function kube::release::gcs::push_images() {
kube::release::gcs::ensure_docker_registry
# Tag each of our run binaries with the right registry and push
for b in ${KUBE_RUN_BINARIES} ; do
echo "+++ Tagging and pushing ${KUBE_RUN_IMAGE_BASE}-$b to GCS bucket ${KUBE_RELEASE_BUCKET}"
docker tag "${KUBE_RUN_IMAGE_BASE}-$b" "localhost:5000/${KUBE_RUN_IMAGE_BASE}-$b"
docker push "localhost:5000/${KUBE_RUN_IMAGE_BASE}-$b"
docker rmi "localhost:5000/${KUBE_RUN_IMAGE_BASE}-$b"
local b image_name
for b in "${KUBE_RUN_BINARIES[@]}" ; do
image_name="${KUBE_RUN_IMAGE_BASE}-${b}"
echo "+++ Tagging and pushing ${image_name} to GCS bucket ${KUBE_RELEASE_BUCKET}"
docker tag "${KUBE_RUN_IMAGE_BASE}-$b" "localhost:5000/${image_name}"
docker push "localhost:5000/${image_name}"
docker rmi "localhost:5000/${image_name}"
done
}
# Package up all of the cross compiled clients
function package-tarballs() {
mkdir -p "${RELEASE_DIR}"
# Find all of the built kubecfg binaries
for platform in _output/build/*/* ; do
echo $platform
local PLATFORM_TAG=$(echo $platform | awk -F / '{ printf "%s-%s", $3, $4 }')
echo "+++ Building client package for $PLATFORM_TAG"
local CLIENT_RELEASE_STAGE="${KUBE_REPO_ROOT}/_output/release-stage/${PLATFORM_TAG}/kubernetes"
mkdir -p "${CLIENT_RELEASE_STAGE}"
mkdir -p "${CLIENT_RELEASE_STAGE}/bin"
cp $platform/* "${CLIENT_RELEASE_STAGE}/bin"
local CLIENT_PACKAGE_NAME="${RELEASE_DIR}/kubernetes-${PLATFORM_TAG}.tar.gz"
tar czf ${CLIENT_PACKAGE_NAME} \
-C "${CLIENT_RELEASE_STAGE}/.." \
.
done
}
function copy-release-to-gcs() {
function kube::release::gcs::copy_release_tarballs() {
# TODO: This isn't atomic. There will be points in time where there will be
# no active release. Also, if something fails, the release could be half-
# copied. The real way to do this would perhaps to have some sort of release
# version so that we are never overwriting a destination.
local -r GCS_DESTINATION="gs://${KUBE_RELEASE_BUCKET}/${KUBE_RELEASE_PREFIX}"
local -r gcs_destination="gs://${KUBE_RELEASE_BUCKET}/${KUBE_RELEASE_PREFIX}"
echo "+++ Copying client tarballs to ${GCS_DESTINATION}"
echo "+++ Copying client tarballs to ${gcs_destination}"
# First delete all objects at the destination
gsutil -q rm -f -R "${GCS_DESTINATION}" >/dev/null 2>&1 || true
gsutil -q rm -f -R "${gcs_destination}" >/dev/null 2>&1 || true
# Now upload everything in release directory
gsutil -m cp -r "${RELEASE_DIR}" "${GCS_DESTINATION}" >/dev/null 2>&1
gsutil -m cp -r "${RELEASE_DIR}" "${gcs_destination}" >/dev/null 2>&1
}

View File

@ -23,5 +23,5 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
copy-output
kube::build::verify_prereqs
kube::build::copy_output

View File

@ -23,6 +23,6 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
build-image
run-build-command build/build-image/make-binaries.sh "$@"
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command build/build-image/make-binaries.sh "$@"

View File

@ -25,5 +25,5 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
build-image
kube::build::verify_prereqs
kube::build::build_image

View File

@ -20,6 +20,10 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
build-image
run-build-command rm -rf _output/build/*
kube::build::verify_prereqs
kube::build::build_image
echo "+++ Cleaning out _output/build/*"
kube::build::run_build_command rm -rf _output/build/*
kube::build::clean_images

View File

@ -23,6 +23,6 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
build-image
run-build-command build/build-image/make-cross.sh
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command build/build-image/make-cross.sh

View File

@ -23,8 +23,8 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
build-image
run-build-command build/build-image/make-binaries.sh "$@"
copy-output
run-image
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command build/build-image/make-binaries.sh "$@"
kube::build::copy_output
kube::build::run_image

View File

@ -22,16 +22,13 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
verify-gcs-prereqs
ensure-gcs-release-bucket
build-image
run-build-command build/build-image/make-binaries.sh
run-build-command build/build-image/make-cross.sh
run-build-command build/build-image/run-tests.sh
run-build-command build/build-image/run-integration.sh
copy-output
run-image
package-tarballs
push-images-to-gcs
copy-release-to-gcs
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command build/build-image/make-binaries.sh
kube::build::run_build_command build/build-image/make-cross.sh
kube::build::run_build_command build/build-image/run-tests.sh
kube::build::run_build_command build/build-image/run-integration.sh
kube::build::copy_output
kube::build::run_image
kube::build::package_tarballs
kube::release::gcs::release

View File

@ -19,5 +19,5 @@ MAINTAINER Joe Beda <jbeda@google.com>
WORKDIR /kubernetes
# Upload Kubernetes
# Upload Kubernetes server binaries
ADD kube-bins.tar.gz /kubernetes

View File

@ -0,0 +1,24 @@
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file creates a minimal container for running Kubernetes binaries
FROM kubernetes
MAINTAINER Joe Beda <jbeda@google.com>
ENV API_SERVER 127.0.0.1:8080
ADD . /kubernetes
CMD ["/kubernetes/run.sh"]

View File

@ -0,0 +1,17 @@
#! /bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
./scheduler -master="${API_SERVER}"

View File

@ -20,7 +20,7 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
build-image
run-build-command build/build-image/make-binaries.sh "integration"
run-build-command build/build-image/run-integration.sh
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command build/build-image/make-binaries.sh "./cmd/integration"
kube::build::run_build_command build/build-image/run-integration.sh

View File

@ -20,6 +20,6 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
build-image
run-build-command build/build-image/run-tests.sh "$@"
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command build/build-image/run-tests.sh "$@"

View File

@ -22,6 +22,6 @@ set -e
source $(dirname $0)/common.sh
verify-prereqs
build-image
run-build-command bash
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command bash

View File

@ -24,11 +24,11 @@ function public-key {
fi
done
echo "Can't find public key file..."
echo "Can't find public key file..." 1>&2
exit 1
}
DISK=kube.vmdk
DISK=./kube/kube.vmdk
GUEST_ID=debian7_64Guest
PUBLIC_KEY_FILE=${PUBLIC_KEY_FILE-$(public-key)}
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null"
@ -40,3 +40,7 @@ SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null"
#export GOVC_RESOURCE_POOL=
#export GOVC_NETWORK=
#export GOVC_GUEST_LOGIN='kube:kube'
# Set GOVC_INSECURE if the host in GOVC_URL is using a certificate that cannot
# be verified (i.e. a self-signed certificate), but IS trusted.
#export GOVC_INSECURE=1

View File

@ -20,6 +20,7 @@ import (
"flag"
"fmt"
"io/ioutil"
"net/http"
"os"
"reflect"
"sort"
@ -39,10 +40,10 @@ import (
)
var (
serverVersion = flag.Bool("server_version", false, "Print the server's version number.")
serverVersion = verflag.Version("server_version", verflag.VersionFalse, "Print the server's version information and quit")
preventSkew = flag.Bool("expect_version_match", false, "Fail if server's version doesn't match own version.")
httpServer = flag.String("h", "", "The host to connect to.")
config = flag.String("c", "", "Path to the config file.")
config = flag.String("c", "", "Path or URL to the config file.")
selector = flag.String("l", "", "Selector (label query) to use for listing")
updatePeriod = flag.Duration("u", 60*time.Second, "Update interval period")
portSpec = flag.String("p", "", "The port spec, comma-separated list of <external>:<internal>,...")
@ -95,17 +96,41 @@ func prettyWireStorage() string {
return strings.Join(types, "|")
}
// readConfigData reads the bytes from the specified filesytem or network location associated with the *config flag
func readConfigData() []byte {
// we look for http:// or https:// to determine if valid URL, otherwise do normal file IO
if strings.Index(*config, "http://") == 0 || strings.Index(*config, "https://") == 0 {
resp, err := http.Get(*config)
if err != nil {
glog.Fatalf("Unable to access URL %v: %v\n", *config, err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
glog.Fatalf("Unable to read URL, server reported %d %s", resp.StatusCode, resp.Status)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
glog.Fatalf("Unable to read URL %v: %v\n", *config, err)
}
return data
} else {
data, err := ioutil.ReadFile(*config)
if err != nil {
glog.Fatalf("Unable to read %v: %v\n", *config, err)
}
return data
}
}
// readConfig reads and parses pod, replicationController, and service
// configuration files. If any errors log and exit non-zero.
func readConfig(storage string) []byte {
if len(*config) == 0 {
glog.Fatal("Need config file (-c)")
}
data, err := ioutil.ReadFile(*config)
if err != nil {
glog.Fatalf("Unable to read %v: %v\n", *config, err)
}
data, err = parser.ToWireFormat(data, storage, runtime.DefaultCodec)
data, err := parser.ToWireFormat(readConfigData(), storage, runtime.DefaultCodec)
if err != nil {
glog.Fatalf("Error parsing %v as an object for %v: %v\n", *config, storage, err)
}
@ -152,14 +177,19 @@ func main() {
}
}
if *serverVersion {
if *serverVersion != verflag.VersionFalse {
got, err := kubeClient.ServerVersion()
if err != nil {
fmt.Printf("Couldn't read version from server: %v\n", err)
os.Exit(1)
}
fmt.Printf("Server Version: %#v\n", got)
if *serverVersion == verflag.VersionRaw {
fmt.Printf("%#v\n", *got)
os.Exit(0)
} else {
fmt.Printf("Server: Kubernetes %s\n", got)
os.Exit(0)
}
}
if *preventSkew {

View File

@ -41,16 +41,17 @@ Upload this VMDK to your vSphere instance:
```sh
export GOVC_URL='https://user:pass@hostname/sdk'
export GOVC_INSECURE=1 # If the host above uses a self-signed cert
export GOVC_DATASTORE='target datastore'
export GOVC_RESOURCE_POOL='resource pool with access to datastore'
export GOVC_RESOURCE_POOL='resource pool or cluster with access to datastore'
govc datastore.import kube.vmdk
govc import.vmdk kube.vmdk ./kube/
```
Verify that the VMDK was correctly uploaded and expanded to 10GiB:
```sh
govc datastore.ls
govc datastore.ls ./kube/
```
Take a look at the file `cluster/vsphere/config-common.sh` fill in the required

View File

@ -48,20 +48,10 @@ for arg; do
done
if [[ ${#targets[@]} -eq 0 ]]; then
targets=(
cmd/proxy
cmd/apiserver
cmd/controller-manager
cmd/kubelet
cmd/kubecfg
plugin/cmd/scheduler
)
targets=($(kube::default_build_targets))
fi
binaries=()
for target in ${targets[@]}; do
binaries+=("${KUBE_GO_PACKAGE}/${target}")
done
binaries=($(kube::binaries_from_targets "${targets[@]}"))
echo "Building local go components"
# Note that the flags to 'go build' are duplicated in the salt build setup

View File

@ -114,6 +114,24 @@ kube::setup_go_environment() {
}
# kube::default_build_targets return list of all build targets
kube::default_build_targets() {
echo "cmd/proxy"
echo "cmd/apiserver"
echo "cmd/controller-manager"
echo "cmd/kubelet"
echo "cmd/kubecfg"
echo "plugin/cmd/scheduler"
}
# kube::binaries_from_targets take a list of build targets and return the
# full go package to be built
kube::binaries_from_targets() {
local target
for target; do
echo "${KUBE_GO_PACKAGE}/${target}"
done
}
# --- Environment Variables ---
# KUBE_REPO_ROOT - Path to the top of the build tree.

View File

@ -326,6 +326,7 @@ func (*ReplicationControllerList) IsAnAPIObject() {}
type ReplicationController struct {
JSONBase `json:",inline" yaml:",inline"`
DesiredState ReplicationControllerState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
CurrentState ReplicationControllerState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}

View File

@ -339,6 +339,7 @@ func (*ReplicationControllerList) IsAnAPIObject() {}
type ReplicationController struct {
JSONBase `json:",inline" yaml:",inline"`
DesiredState ReplicationControllerState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
CurrentState ReplicationControllerState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}

View File

@ -228,8 +228,8 @@ func ValidateManifest(manifest *api.ContainerManifest) errs.ErrorList {
} else if !supportedManifestVersions.Has(strings.ToLower(manifest.Version)) {
allErrs = append(allErrs, errs.NewFieldNotSupported("version", manifest.Version))
}
allVolumes, errs := validateVolumes(manifest.Volumes)
allErrs = append(allErrs, errs.Prefix("volumes")...)
allVolumes, vErrs := validateVolumes(manifest.Volumes)
allErrs = append(allErrs, vErrs.Prefix("volumes")...)
allErrs = append(allErrs, validateContainers(manifest.Containers, allVolumes).Prefix("containers")...)
allErrs = append(allErrs, validateRestartPolicy(&manifest.RestartPolicy).Prefix("restartPolicy")...)
return allErrs

View File

@ -29,6 +29,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait"
"github.com/GoogleCloudPlatform/kubernetes/pkg/version"
"github.com/golang/glog"
@ -174,6 +175,9 @@ func portsFromString(spec string) []api.Port {
// RunController creates a new replication controller named 'name' which creates 'replicas' pods running 'image'.
func RunController(image, name string, replicas int, client client.Interface, portSpec string, servicePort int) error {
if servicePort > 0 && !util.IsDNSLabel(name) {
return fmt.Errorf("Service creation requested, but an invalid name for a service was provided (%s). Service names must be valid DNS labels.", name)
}
controller := &api.ReplicationController{
JSONBase: api.JSONBase{
ID: name,

View File

@ -92,6 +92,7 @@ func (rs *REST) Get(id string) (runtime.Object, error) {
if err != nil {
return nil, err
}
rs.fillCurrentState(controller)
return controller, err
}
@ -104,6 +105,7 @@ func (rs *REST) List(selector labels.Selector) (runtime.Object, error) {
filtered := []api.ReplicationController{}
for _, controller := range controllers.Items {
if selector.Matches(labels.Set(controller.Labels)) {
rs.fillCurrentState(&controller)
filtered = append(filtered, controller)
}
}
@ -147,7 +149,11 @@ func (rs *REST) Watch(label, field labels.Selector, resourceVersion uint64) (wat
}
return watch.Filter(incoming, func(e watch.Event) (watch.Event, bool) {
repController := e.Object.(*api.ReplicationController)
return e, label.Matches(labels.Set(repController.Labels))
match := label.Matches(labels.Set(repController.Labels))
if match {
rs.fillCurrentState(repController)
}
return e, match
}), nil
}
@ -164,3 +170,15 @@ func (rs *REST) waitForController(ctrl *api.ReplicationController) (runtime.Obje
}
return ctrl, nil
}
func (rs *REST) fillCurrentState(ctrl *api.ReplicationController) error {
if rs.podLister == nil {
return nil
}
list, err := rs.podLister.ListPods(labels.Set(ctrl.DesiredState.ReplicaSelector).AsSelector())
if err != nil {
return err
}
ctrl.CurrentState.Replicas = len(list.Items)
return nil
}

View File

@ -316,3 +316,44 @@ func TestControllerStorageValidatesUpdate(t *testing.T) {
}
}
}
type fakePodLister struct {
e error
l api.PodList
s labels.Selector
}
func (f *fakePodLister) ListPods(s labels.Selector) (*api.PodList, error) {
f.s = s
return &f.l, f.e
}
func TestFillCurrentState(t *testing.T) {
fakeLister := fakePodLister{
l: api.PodList{
Items: []api.Pod{
{JSONBase: api.JSONBase{ID: "foo"}},
{JSONBase: api.JSONBase{ID: "bar"}},
},
},
}
mockRegistry := registrytest.ControllerRegistry{}
storage := REST{
registry: &mockRegistry,
podLister: &fakeLister,
}
controller := api.ReplicationController{
DesiredState: api.ReplicationControllerState{
ReplicaSelector: map[string]string{
"foo": "bar",
},
},
}
storage.fillCurrentState(&controller)
if controller.CurrentState.Replicas != 2 {
t.Errorf("expected 2, got: %d", controller.CurrentState.Replicas)
}
if !reflect.DeepEqual(fakeLister.s, labels.Set(controller.DesiredState.ReplicaSelector).AsSelector()) {
t.Errorf("unexpected output: %#v %#v", labels.Set(controller.DesiredState.ReplicaSelector).AsSelector(), fakeLister.s)
}
}

View File

@ -23,9 +23,10 @@ package version
// version for ad-hoc builds (e.g. `go build`) that cannot get the version
// information from git.
//
// The "+" in the version info indicates that fact, and it means the current
// build is from a version greater or equal to that version.
// (e.g. v0.7+ means version >= 0.7 and < 0.8)
// The "-dev" suffix in the version info indicates that fact, and it means the
// current build is from a version greater that version. For example, v0.7-dev
// means version > 0.7 and < 0.8. (There's exceptions to this rule, see
// docs/releasing.md for more details.)
//
// When releasing a new Kubernetes version, this file should be updated to
// reflect the new version, and then a git annotated tag (using format vX.Y
@ -33,9 +34,10 @@ package version
// to the commit that updates pkg/version/base.go
var (
// TODO: Deprecate gitMajor and gitMinor, use only gitVersion instead.
gitMajor string = "0" // major version, always numeric
gitMinor string = "2+" // minor version, numeric possibly followed by "+"
gitVersion string = "v0.2+" // version from git, output of $(git describe)
gitVersion string = "v0.2-dev" // version from git, output of $(git describe)
gitCommit string = "" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"
)

View File

@ -16,10 +16,6 @@ limitations under the License.
package version
import (
"fmt"
)
// Info contains versioning information.
// TODO: Add []string of api versions supported? It's still unclear
// how we'll want to distribute that information.
@ -47,9 +43,5 @@ func Get() Info {
// String returns info as a human-friendly version string.
func (info Info) String() string {
commit := info.GitCommit
if commit == "" {
commit = "(unknown)"
}
return fmt.Sprintf("version %s.%s, build %s", info.Major, info.Minor, commit)
return info.GitVersion
}