mirror of https://github.com/k3s-io/k3s
Rename `output/` directory to `_output/`
go build ./... will ignore any directory starting with an underscore.pull/6/head
parent
c5520dd39d
commit
843ae1fbe2
|
@ -9,6 +9,8 @@
|
|||
# This is where the result of the go build goes
|
||||
/output/**
|
||||
/output
|
||||
/_output/**
|
||||
/_output
|
||||
|
||||
# Emacs save files
|
||||
*~
|
||||
|
|
|
@ -16,8 +16,8 @@ To build Kubernetes you need to have access to a Docker installation through eit
|
|||
* `run-tests.sh`: This will run the Kubernetes unit tests in a Docker container
|
||||
* `run-integration.sh`: This will build and run the integration test in a Docker container
|
||||
* `make-cross.sh`: This will make all cross-compiled binaries (currently just kubecfg).
|
||||
* `copy-output.sh`: This will copy the contents of `output/build` from any remote Docker container to the local `output/build`. Right now this is only necessary on Mac OS X with `boot2docker`.
|
||||
* `make-clean.sh`: Clean out the contents of `output/build`.
|
||||
* `copy-output.sh`: This will copy the contents of `_output/build` from any remote Docker container to the local `_output/build`. Right now this is only necessary on Mac OS X with `boot2docker`.
|
||||
* `make-clean.sh`: Clean out the contents of `_output/build`.
|
||||
* `shell.sh`: Drop into a `bash` shell in a build container with a snapshot of the current repo code.
|
||||
* `release.sh`: Build everything, test it, upload the results to a GCS bucket. Docker images are also sent to the same bucket using the [`google/docker-registry`](https://registry.hub.docker.com/u/google/docker-registry/) Docker image.
|
||||
|
||||
|
@ -44,11 +44,11 @@ Other build artifacts:
|
|||
|
||||
The scripts directly under `build/` are used to build and test. They will ensure that the `kube-build` Docker image is built (based on `build/build-image/Dockerfile`) and then execute the appropriate command in that container. If necessary (for Mac OS X), the scripts will also copy results out.
|
||||
|
||||
The `kube-build` container image is built by first creating a "context" directory in `output/images/build-image`. It is done there instead of at the root of the Kubernetes repo to minimize the amount of data we need to package up when building the image.
|
||||
The `kube-build` container image is built by first creating a "context" directory in `_output/images/build-image`. It is done there instead of at the root of the Kubernetes repo to minimize the amount of data we need to package up when building the image.
|
||||
|
||||
Everything in `build/build-image/` is meant to be run inside of the container. If it doesn't think it is running in the container it'll throw a warning. While you can run some of that stuff outside of the container, it wasn't built to do so.
|
||||
|
||||
The files necessarily for the release Docker images are in `build/run-images/*`. All of this is staged into `output/images` similar to build-image. The `base` image is used as a base for each of the specialized containers and is generally never pushed to a shared repository.
|
||||
The files necessarily for the release Docker images are in `build/run-images/*`. All of this is staged into `_output/images` similar to build-image. The `base` image is used as a base for each of the specialized containers and is generally never pushed to a shared repository.
|
||||
|
||||
## TODOs
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
cd $(dirname "${BASH_SOURCE}")/../.. >/dev/null
|
||||
readonly KUBE_REPO_ROOT="${PWD}"
|
||||
readonly KUBE_TARGET="${KUBE_REPO_ROOT}/output/build"
|
||||
readonly KUBE_TARGET="${KUBE_REPO_ROOT}/_output/build"
|
||||
readonly KUBE_GO_PACKAGE=github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
mkdir -p "${KUBE_TARGET}"
|
||||
|
|
|
@ -18,12 +18,12 @@ set -e
|
|||
|
||||
source $(dirname $0)/common.sh
|
||||
|
||||
ETCD_DIR="${KUBE_REPO_ROOT}/output/etcd"
|
||||
ETCD_DIR="${KUBE_REPO_ROOT}/_output/etcd"
|
||||
mkdir -p "${ETCD_DIR}"
|
||||
|
||||
echo "+++ Running integration test"
|
||||
|
||||
etcd -name test -data-dir ${ETCD_DIR} > "${KUBE_REPO_ROOT}/output/etcd.log" &
|
||||
etcd -name test -data-dir ${ETCD_DIR} > "${KUBE_REPO_ROOT}/_output/etcd.log" &
|
||||
ETCD_PID=$!
|
||||
|
||||
sleep 5
|
||||
|
|
|
@ -27,13 +27,13 @@ fi
|
|||
readonly KUBE_BUILD_IMAGE
|
||||
readonly KUBE_GO_PACKAGE="github.com/GoogleCloudPlatform/kubernetes"
|
||||
|
||||
# We set up a volume so that we have the same output directory from one run of
|
||||
# We set up a volume so that we have the same _output directory from one run of
|
||||
# the container to the next.
|
||||
#
|
||||
# Note that here "LOCAL" is local to the docker daemon. In the boot2docker case
|
||||
# this is still inside the VM. We use the same directory in both cases though.
|
||||
readonly LOCAL_OUTPUT_DIR="${KUBE_REPO_ROOT}/output/build"
|
||||
readonly REMOTE_OUTPUT_DIR="/go/src/${KUBE_GO_PACKAGE}/output/build"
|
||||
readonly LOCAL_OUTPUT_DIR="${KUBE_REPO_ROOT}/_output/build"
|
||||
readonly REMOTE_OUTPUT_DIR="/go/src/${KUBE_GO_PACKAGE}/_output/build"
|
||||
readonly DOCKER_CONTAINER_NAME=kube-build
|
||||
readonly DOCKER_MOUNT="-v ${LOCAL_OUTPUT_DIR}:${REMOTE_OUTPUT_DIR}"
|
||||
|
||||
|
@ -45,7 +45,7 @@ readonly KUBE_RUN_BINARIES="
|
|||
"
|
||||
|
||||
# This is where the final release artifacts are created locally
|
||||
readonly RELEASE_DIR="${KUBE_REPO_ROOT}/output/release"
|
||||
readonly RELEASE_DIR="${KUBE_REPO_ROOT}/_output/release"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Basic setup functions
|
||||
|
@ -116,7 +116,7 @@ function verify-gcs-prereqs() {
|
|||
|
||||
# Set up the context directory for the kube-build image and build it.
|
||||
function build-image() {
|
||||
local -r BUILD_CONTEXT_DIR="${KUBE_REPO_ROOT}/output/images/${KUBE_BUILD_IMAGE}"
|
||||
local -r BUILD_CONTEXT_DIR="${KUBE_REPO_ROOT}/_output/images/${KUBE_BUILD_IMAGE}"
|
||||
local -r SOURCE="
|
||||
api
|
||||
build
|
||||
|
@ -137,14 +137,14 @@ function build-image() {
|
|||
}
|
||||
|
||||
# Builds the runtime image. Assumes that the appropriate binaries are already
|
||||
# built and in output/build/.
|
||||
# built and in _output/build/.
|
||||
function run-image() {
|
||||
local -r BUILD_CONTEXT_BASE="${KUBE_REPO_ROOT}/output/images/${KUBE_RUN_IMAGE_BASE}"
|
||||
local -r BUILD_CONTEXT_BASE="${KUBE_REPO_ROOT}/_output/images/${KUBE_RUN_IMAGE_BASE}"
|
||||
|
||||
# First build the base image. This one brings in all of the binaries.
|
||||
mkdir -p "${BUILD_CONTEXT_BASE}"
|
||||
tar czf ${BUILD_CONTEXT_BASE}/kube-bins.tar.gz \
|
||||
-C "output/build/linux/amd64" \
|
||||
-C "_output/build/linux/amd64" \
|
||||
${KUBE_RUN_BINARIES}
|
||||
cp -R build/run-images/base/* "${BUILD_CONTEXT_BASE}/"
|
||||
docker-build "${KUBE_RUN_IMAGE_BASE}" "${BUILD_CONTEXT_BASE}"
|
||||
|
@ -212,7 +212,7 @@ function copy-output() {
|
|||
# Kill any leftover container
|
||||
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
|
||||
|
||||
echo "+++ Syncing back output directory from boot2docker VM"
|
||||
echo "+++ Syncing back _output directory from boot2docker VM"
|
||||
mkdir -p "${LOCAL_OUTPUT_DIR}"
|
||||
rm -rf "${LOCAL_OUTPUT_DIR}/*"
|
||||
${DOCKER} sh -c "tar c -C ${REMOTE_OUTPUT_DIR} ." \
|
||||
|
@ -300,12 +300,12 @@ function package-tarballs() {
|
|||
mkdir -p "${RELEASE_DIR}"
|
||||
|
||||
# Find all of the built kubecfg binaries
|
||||
for platform in output/build/*/* ; do
|
||||
for platform in _output/build/*/* ; do
|
||||
echo $platform
|
||||
local PLATFORM_TAG=$(echo $platform | awk -F / '{ printf "%s-%s", $3, $4 }')
|
||||
echo "+++ Building client package for $PLATFORM_TAG"
|
||||
|
||||
local CLIENT_RELEASE_STAGE="${KUBE_REPO_ROOT}/output/release-stage/${PLATFORM_TAG}/kubernetes"
|
||||
local CLIENT_RELEASE_STAGE="${KUBE_REPO_ROOT}/_output/release-stage/${PLATFORM_TAG}/kubernetes"
|
||||
mkdir -p "${CLIENT_RELEASE_STAGE}"
|
||||
mkdir -p "${CLIENT_RELEASE_STAGE}/bin"
|
||||
|
||||
|
|
|
@ -22,4 +22,4 @@ source $(dirname $0)/common.sh
|
|||
|
||||
verify-prereqs
|
||||
build-image
|
||||
run-build-command rm -rf output/build/*
|
||||
run-build-command rm -rf _output/build/*
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
source $(dirname $0)/kube-env.sh
|
||||
source $(dirname $0)/$KUBERNETES_PROVIDER/util.sh
|
||||
|
||||
CLOUDCFG=$(dirname $0)/../output/go/bin/kubecfg
|
||||
CLOUDCFG=$(dirname $0)/../_output/go/bin/kubecfg
|
||||
if [ ! -x $CLOUDCFG ]; then
|
||||
echo "Could not find kubecfg binary. Run hack/build-go.sh to build it."
|
||||
exit 1
|
||||
|
|
|
@ -49,7 +49,7 @@ EOF
|
|||
cat <<EOF >/etc/salt/master.d/salt-output.conf
|
||||
# Minimize the amount of output to terminal
|
||||
state_verbose: False
|
||||
state_output: mixed
|
||||
state_output: mixed
|
||||
EOF
|
||||
|
||||
# Configure nginx authorization
|
||||
|
@ -83,8 +83,8 @@ EOF
|
|||
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M
|
||||
|
||||
# Install salt-api
|
||||
#
|
||||
# This is used to inform the cloud provider used in the vagrant cluster
|
||||
#
|
||||
# This is used to inform the cloud provider used in the vagrant cluster
|
||||
yum install -y salt-api
|
||||
systemctl enable salt-api
|
||||
systemctl start salt-api
|
||||
|
@ -98,7 +98,7 @@ pushd /vagrant
|
|||
popd
|
||||
|
||||
echo "Running release install script"
|
||||
pushd /vagrant/output/release/master-release/src/scripts
|
||||
pushd /vagrant/_output/release/master-release/src/scripts
|
||||
./master-release-install.sh
|
||||
popd
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ function kube-up {
|
|||
govc guest.upload \
|
||||
-vm ${MASTER_NAME} \
|
||||
-f \
|
||||
./output/release/master-release.tgz \
|
||||
./_output/release/master-release.tgz \
|
||||
/home/kube/master-release.tgz
|
||||
|
||||
# Kickstart start script
|
||||
|
@ -248,7 +248,7 @@ function kube-push {
|
|||
govc guest.upload \
|
||||
-vm ${MASTER_NAME} \
|
||||
-f \
|
||||
./output/release/master-release.tgz \
|
||||
./_output/release/master-release.tgz \
|
||||
/home/kube/master-release.tgz
|
||||
|
||||
(
|
||||
|
|
|
@ -108,7 +108,7 @@ KUBE_REPO_ROOT=$(
|
|||
)
|
||||
export KUBE_REPO_ROOT
|
||||
|
||||
KUBE_TARGET="${KUBE_REPO_ROOT}/output/go"
|
||||
KUBE_TARGET="${KUBE_REPO_ROOT}/_output/go"
|
||||
mkdir -p "${KUBE_TARGET}"
|
||||
export KUBE_TARGET
|
||||
|
||||
|
@ -116,7 +116,7 @@ KUBE_GO_PACKAGE=github.com/GoogleCloudPlatform/kubernetes
|
|||
export KUBE_GO_PACKAGE
|
||||
|
||||
(
|
||||
# Create symlink named ${KUBE_GO_PACKAGE} under output/go/src.
|
||||
# Create symlink named ${KUBE_GO_PACKAGE} under _output/go/src.
|
||||
# So that Go knows how to import Kubernetes sources by full path.
|
||||
# Use a subshell to avoid leaking these variables.
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ API_PORT=${API_PORT:-8080}
|
|||
API_HOST=${API_HOST:-127.0.0.1}
|
||||
KUBELET_PORT=${KUBELET_PORT:-10250}
|
||||
|
||||
GO_OUT=$(dirname $0)/../output/go/bin
|
||||
GO_OUT=$(dirname $0)/../_output/go/bin
|
||||
|
||||
APISERVER_LOG=/tmp/apiserver.log
|
||||
"${GO_OUT}/apiserver" \
|
||||
|
|
|
@ -44,7 +44,7 @@ ETCD_PORT=${ETCD_PORT:-4001}
|
|||
API_PORT=${API_PORT:-8080}
|
||||
API_HOST=${API_HOST:-127.0.0.1}
|
||||
KUBELET_PORT=${KUBELET_PORT:-10250}
|
||||
GO_OUT=$(dirname $0)/../output/go/bin
|
||||
GO_OUT=$(dirname $0)/../_output/go/bin
|
||||
|
||||
# Check kubecfg
|
||||
out=$(${GO_OUT}/kubecfg -version)
|
||||
|
|
|
@ -31,6 +31,7 @@ find_test_dirs() {
|
|||
find . -not \( \
|
||||
\( \
|
||||
-wholename './output' \
|
||||
-o -wholename './_output' \
|
||||
-o -wholename './release' \
|
||||
-o -wholename './target' \
|
||||
-o -wholename '*/third_party/*' \
|
||||
|
|
|
@ -43,7 +43,7 @@ trap "echo etcd still running" EXIT
|
|||
echo
|
||||
echo Integration scenario ...
|
||||
echo
|
||||
$(dirname $0)/../output/go/bin/integration
|
||||
$(dirname $0)/../_output/go/bin/integration
|
||||
|
||||
# nuke etcd
|
||||
trap cleanup EXIT SIGINT
|
||||
|
|
|
@ -18,7 +18,7 @@ REPO_ROOT="$(cd "$(dirname "$0")/../" && pwd -P)"
|
|||
|
||||
result=0
|
||||
|
||||
gofiles="$(find ${REPO_ROOT} -type f | grep "[.]go$" | grep -v "Godeps/\|third_party/\|release/\|output/\|target/")"
|
||||
gofiles="$(find ${REPO_ROOT} -type f | grep "[.]go$" | grep -v "Godeps/\|third_party/\|release/\|_?output/|target/")"
|
||||
for file in ${gofiles}; do
|
||||
if [[ "$(${REPO_ROOT}/hooks/boilerplate.sh "${file}")" -eq "0" ]]; then
|
||||
echo "Boilerplate header is wrong for: ${file}"
|
||||
|
|
|
@ -26,7 +26,7 @@ fi
|
|||
|
||||
REPO_ROOT="$(cd "$(dirname "$0")/../" && pwd -P)"
|
||||
|
||||
files="$(find ${REPO_ROOT} -type f | grep "[.]go$" | grep -v "third_party/\|release/\|output/\|target/\|Godeps/")"
|
||||
files="$(find ${REPO_ROOT} -type f | grep "[.]go$" | grep -v "third_party/\|release/\|_?output/\|target/\|Godeps/")"
|
||||
bad=$(gofmt -s -l ${files})
|
||||
if [[ -n "${bad}" ]]; then
|
||||
echo "$bad"
|
||||
|
|
|
@ -58,6 +58,6 @@ fi
|
|||
azure storage blob upload \
|
||||
-a $AZ_STG \
|
||||
-k "$stg_key" \
|
||||
$SCRIPT_DIR/../../output/release/master-release.tgz \
|
||||
$SCRIPT_DIR/../../_output/release/master-release.tgz \
|
||||
$CONTAINER \
|
||||
master-release.tgz
|
||||
|
|
|
@ -31,9 +31,9 @@ KUBE_DIR=$SCRIPT_DIR/..
|
|||
# Next build the release tar. This gets copied on to the master and installed
|
||||
# from there. It includes the go source for the necessary servers along with
|
||||
# the salt configs.
|
||||
rm -rf $KUBE_DIR/output/release/*
|
||||
rm -rf $KUBE_DIR/_output/release/*
|
||||
|
||||
MASTER_RELEASE_DIR=$KUBE_DIR/output/release/master-release
|
||||
MASTER_RELEASE_DIR=$KUBE_DIR/_output/release/master-release
|
||||
mkdir -p $MASTER_RELEASE_DIR/bin
|
||||
mkdir -p $MASTER_RELEASE_DIR/src/scripts
|
||||
|
||||
|
@ -63,12 +63,12 @@ function find_go_files() {
|
|||
\) -name '*.go'
|
||||
}
|
||||
# find_go_files is directory dependent
|
||||
pushd $KUBE_DIR
|
||||
pushd $KUBE_DIR >/dev/null
|
||||
for f in $(find_go_files); do
|
||||
mkdir -p $MASTER_RELEASE_DIR/src/go/$(dirname ${f})
|
||||
cp ${f} ${MASTER_RELEASE_DIR}/src/go/${f}
|
||||
done
|
||||
popd
|
||||
popd >/dev/null
|
||||
|
||||
echo "Packaging release"
|
||||
tar cz -C $KUBE_DIR/output/release -f $KUBE_DIR/output/release/master-release.tgz master-release
|
||||
tar cz -C $KUBE_DIR/_output/release -f $KUBE_DIR/_output/release/master-release.tgz master-release
|
||||
|
|
|
@ -43,7 +43,7 @@ if ! swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD get $CONTAINER > /d
|
|||
fi
|
||||
|
||||
for x in master-release.tgz; do
|
||||
swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD put -i output/release/$x $CONTAINER/output/release/$x > /dev/null 2>&1
|
||||
swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD put -i _output/release/$x $CONTAINER/output/release/$x > /dev/null 2>&1
|
||||
done
|
||||
|
||||
echo "Release pushed."
|
||||
|
|
|
@ -38,21 +38,21 @@ $SCRIPT_DIR/build-release.sh $INSTANCE_PREFIX
|
|||
echo "Building launch script"
|
||||
# Create the local install script. These are the tools to install the local
|
||||
# tools and launch a new cluster.
|
||||
LOCAL_RELEASE_DIR=output/release/local-release
|
||||
LOCAL_RELEASE_DIR=_output/release/local-release
|
||||
mkdir -p $LOCAL_RELEASE_DIR/src/scripts
|
||||
|
||||
cp -r cluster/templates $LOCAL_RELEASE_DIR/src/templates
|
||||
cp -r cluster/*.sh $LOCAL_RELEASE_DIR/src/scripts
|
||||
|
||||
tar cz -C $LOCAL_RELEASE_DIR -f output/release/launch-kubernetes.tgz .
|
||||
tar cz -C $LOCAL_RELEASE_DIR -f _output/release/launch-kubernetes.tgz .
|
||||
|
||||
echo "#!/bin/bash" >> output/release/launch-kubernetes.sh
|
||||
echo "RELEASE_TAG=$RELEASE_TAG" >> output/release/launch-kubernetes.sh
|
||||
echo "RELEASE_PREFIX=$RELEASE_PREFIX" >> output/release/launch-kubernetes.sh
|
||||
echo "RELEASE_NAME=$RELEASE_NAME" >> output/release/launch-kubernetes.sh
|
||||
echo "RELEASE_FULL_PATH=$RELEASE_FULL_PATH" >> output/release/launch-kubernetes.sh
|
||||
cat release/launch-kubernetes-base.sh >> output/release/launch-kubernetes.sh
|
||||
chmod a+x output/release/launch-kubernetes.sh
|
||||
echo "#!/bin/bash" >> _output/release/launch-kubernetes.sh
|
||||
echo "RELEASE_TAG=$RELEASE_TAG" >> _output/release/launch-kubernetes.sh
|
||||
echo "RELEASE_PREFIX=$RELEASE_PREFIX" >> _output/release/launch-kubernetes.sh
|
||||
echo "RELEASE_NAME=$RELEASE_NAME" >> _output/release/launch-kubernetes.sh
|
||||
echo "RELEASE_FULL_PATH=$RELEASE_FULL_PATH" >> _output/release/launch-kubernetes.sh
|
||||
cat release/launch-kubernetes-base.sh >> _output/release/launch-kubernetes.sh
|
||||
chmod a+x _output/release/launch-kubernetes.sh
|
||||
|
||||
# Now copy everything up to the release structure on GS
|
||||
echo "Uploading to Google Storage"
|
||||
|
@ -61,7 +61,7 @@ if ! gsutil ls $RELEASE_BUCKET > /dev/null 2>&1 ; then
|
|||
gsutil mb $RELEASE_BUCKET
|
||||
fi
|
||||
for x in master-release.tgz launch-kubernetes.tgz launch-kubernetes.sh; do
|
||||
gsutil -q cp output/release/$x $RELEASE_FULL_PATH/$x
|
||||
gsutil -q cp _output/release/$x $RELEASE_FULL_PATH/$x
|
||||
|
||||
make_public_readable $RELEASE_FULL_PATH/$x
|
||||
done
|
||||
|
|
Loading…
Reference in New Issue