Build platform specific tarballs and upload them to the release directory.

pull/6/head
Joe Beda 2014-06-20 15:17:14 -07:00
parent 96f8cd3761
commit 866cad9bd9
3 changed files with 56 additions and 1 deletions

View File

@ -49,3 +49,16 @@ The `kube-build` container image is built by first creating a "context" director
Everything in `build/build-image/` is meant to be run inside of the container. If it doesn't think it is running in the container it'll throw a warning. While you can run some of that stuff outside of the container, it wasn't built to do so.
The files necessarily for the release Docker images are in `build/run-images/*`. All of this is staged into `output/images` similar to build-image. The `base` image is used as a base for each of the specialized containers and is generally never pushed to a shared repository.
## TODOs
@jbeda is going on vacation and can't complete this work for a while. Here are the logical next steps:
* [ ] Get a cluster up and running with the Docker images. Perhaps start with a local cluster and move up to a GCE cluster.
* [ ] Implement #186 and #187. This will make it easier to develop Kubernetes.
* [ ] Deprecate/replace most of the stuff in the hack/
* [ ] Put together a true client distribution. You should be able to download the tarball for your platform and get a Kube cluster running in <5 minutes. Not `git clone`.
* [ ] Plumb in a version so we can do proper versioned releases. Probably create a `./VERSION` and dope it with the git hash or something?
* [ ] Create an install script that'll let us do a `curl https://[URL] | bash` to get that tarball down and ensure that other dependencies (cloud SDK?) are installed and configured correctly.
* [ ] Support Windows as a client.
* [ ] Support uploading to the Docker index instead of the GCS bucket. This'll allow easier installs for those not running on GCE

View File

@ -44,6 +44,9 @@ readonly KUBE_RUN_BINARIES="
proxy
"
# This is where the final release artifacts are created locally
readonly RELEASE_DIR="${KUBE_REPO_ROOT}/output/release"
# ---------------------------------------------------------------------------
# Basic setup functions
@ -176,7 +179,6 @@ function docker-build() {
set -e
}
# Run a command in the kube-build image. This assumes that the image has
# already been built. This will sync out all output data from the build.
function run-build-command() {
@ -289,3 +291,41 @@ function push-images-to-gcs() {
done
}
# Package up all of the cross compiled clients
function package-tarballs() {
mkdir -p "${RELEASE_DIR}"
# Find all of the built cloudcfg binaries
for platform in output/build/*/* ; do
echo $platform
local PLATFORM_TAG=$(echo $platform | awk -F / '{ printf "%s-%s", $3, $4 }')
echo "+++ Building client package for $PLATFORM_TAG"
local CLIENT_RELEASE_STAGE="${KUBE_REPO_ROOT}/output/release-stage/${PLATFORM_TAG}/kubernetes"
mkdir -p "${CLIENT_RELEASE_STAGE}"
mkdir -p "${CLIENT_RELEASE_STAGE}/bin"
cp $platform/* "${CLIENT_RELEASE_STAGE}/bin"
local CLIENT_PACKAGE_NAME="${RELEASE_DIR}/kubernetes-${PLATFORM_TAG}.tar.gz"
tar czf ${CLIENT_PACKAGE_NAME} \
-C "${CLIENT_RELEASE_STAGE}/.." \
.
done
}
function copy-release-to-gcs() {
# TODO: This isn't atomic. There will be points in time where there will be
# no active release. Also, if something fails, the release could be half-
# copied. The real way to do this would perhaps to have some sort of release
# version so that we are never overwriting a destination.
local -r GCS_DESTINATION="gs://${KUBE_RELEASE_BUCKET}/${KUBE_RELEASE_PREFIX}"
echo "+++ Copying client tarballs to ${GCS_DESTINATION}"
# First delete all objects at the destination
gsutil -q rm -f -R "${GCS_DESTINATION}" >/dev/null 2>&1 || true
# Now upload everything in release directory
gsutil -m cp -r "${RELEASE_DIR}" "${GCS_DESTINATION}" >/dev/null 2>&1
}

View File

@ -32,4 +32,6 @@ run-build-command build/build-image/run-tests.sh
run-build-command build/build-image/run-integration.sh
copy-output
run-image
package-tarballs
push-images-to-gcs
copy-release-to-gcs