Remove unused bash variables

pull/6/head
Joe Beda 2016-10-03 19:41:49 -07:00
parent 034fc8829f
commit d955f54918
5 changed files with 14 additions and 37 deletions

View File

@ -92,7 +92,7 @@ Look at `docker-machine stop`, `docker-machine start` and `docker-machine rm` to
## Releasing
The `build/release.sh` script will build a release. It will build binaries, run tests, (optionally) build runtime Docker images and then (optionally) upload all build artifacts to a GCS bucket.
The `build/release.sh` script will build a release. It will build binaries, run tests, (optionally) build runtime Docker images.
The main output is a tar file: `kubernetes.tar.gz`. This includes:
* Cross compiled client utilities.
@ -107,18 +107,6 @@ In addition, there are some other tar files that are created:
* `kubernetes-server-*.tar.gz` Server binaries for a specific platform.
* `kubernetes-salt.tar.gz` The salt script/tree shared across multiple deployment scripts.
The release utilities grab a set of environment variables to modify behavior. Arguably, these should be command line flags:
Env Variable | Default | Description
-------------|---------|------------
`KUBE_SKIP_CONFIRMATIONS` | `n` | If `y` then no questions are asked and the scripts just continue.
`KUBE_GCS_UPLOAD_RELEASE` | `n` | Upload release artifacts to GCS
`KUBE_GCS_RELEASE_BUCKET` | `kubernetes-releases-${project_hash}` | The bucket to upload releases to
`KUBE_GCS_RELEASE_PREFIX` | `devel` | The path under the release bucket to put releases
`KUBE_GCS_MAKE_PUBLIC` | `y` | Make GCS links readable from anywhere
`KUBE_GCS_NO_CACHING` | `y` | Disable HTTP caching of GCS release artifacts. By default GCS will cache public objects for up to an hour. When doing "devel" releases this can cause problems.
`KUBE_GCS_DOCKER_REG_PREFIX` | `docker-reg` | *Experimental* When uploading docker images, the bucket that backs the registry.
When building final release tars, they are first staged into `_output/release-stage` before being tar'd up and put into `_output/release-tars`.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/README.md?pixel)]()

View File

@ -20,7 +20,6 @@ set -o nounset
set -o pipefail
DOCKER_OPTS=${DOCKER_OPTS:-""}
DOCKER_NATIVE=${DOCKER_NATIVE:-""}
DOCKER=(docker ${DOCKER_OPTS})
DOCKER_HOST=${DOCKER_HOST:-""}
DOCKER_MACHINE_NAME=${DOCKER_MACHINE_NAME:-"kube-dev"}
@ -173,7 +172,7 @@ function kube::build::verify_prereqs() {
LOCAL_OUTPUT_BUILD_CONTEXT="${LOCAL_OUTPUT_IMAGE_STAGING}/${KUBE_BUILD_IMAGE}"
kube::version::get_version_vars
kube::version::save_version_vars "${KUBE_ROOT}/.dockerized-kube-version-defs"
kube::version::save_version_vars "${KUBE_ROOT}/.dockerized-kube-version-defs"
}
# ---------------------------------------------------------------------------

View File

@ -14,16 +14,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Incoming options
readonly KUBE_SKIP_CONFIRMATIONS="${KUBE_SKIP_CONFIRMATIONS:-n}"
readonly KUBE_GCS_UPLOAD_RELEASE="${KUBE_GCS_UPLOAD_RELEASE:-n}"
readonly KUBE_GCS_NO_CACHING="${KUBE_GCS_NO_CACHING:-y}"
readonly KUBE_GCS_MAKE_PUBLIC="${KUBE_GCS_MAKE_PUBLIC:-y}"
# KUBE_GCS_RELEASE_BUCKET default: kubernetes-releases-${project_hash}
readonly KUBE_GCS_RELEASE_PREFIX=${KUBE_GCS_RELEASE_PREFIX-devel}/
readonly KUBE_GCS_DOCKER_REG_PREFIX=${KUBE_GCS_DOCKER_REG_PREFIX-docker-reg}/
readonly KUBE_GCS_PUBLISH_VERSION=${KUBE_GCS_PUBLISH_VERSION:-}
readonly KUBE_GCS_DELETE_EXISTING="${KUBE_GCS_DELETE_EXISTING:-n}"
# This file creates release artifacts (tar files, container images) that are
# ready to distribute to install or distribute to end users.
###############################################################################
# Most of the ::release:: namespace functions have been moved to
# github.com/kubernetes/release. Have a look in that repo and specifically in
# lib/releaselib.sh for ::release::-related functionality.
###############################################################################
# This is where the final release artifacts are created locally
readonly RELEASE_STAGE="${LOCAL_OUTPUT_ROOT}/release-stage"
@ -442,9 +440,3 @@ function kube::release::create_tarball() {
"${TAR}" czf "${tarfile}" -C "${stagingdir}" kubernetes --owner=0 --group=0
}
###############################################################################
# Most of the ::release:: namespace functions have been moved to
# github.com/kubernetes/release. Have a look in that repo and specifically in
# lib/releaselib.sh for ::release::-related functionality.
###############################################################################

View File

@ -31,7 +31,6 @@ set -o xtrace
# space.
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
export PATH=$PATH:/usr/local/go/bin
export KUBE_SKIP_CONFIRMATIONS=y
# Skip gcloud update checking
export CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=true

View File

@ -18,7 +18,6 @@
# local-up.sh, but this one launches the three separate binaries.
# You may need to run this as root to allow kubelet to open docker's socket.
DOCKER_OPTS=${DOCKER_OPTS:-""}
DOCKER_NATIVE=${DOCKER_NATIVE:-""}
DOCKER=(docker ${DOCKER_OPTS})
DOCKERIZE_KUBELET=${DOCKERIZE_KUBELET:-""}
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
@ -379,7 +378,7 @@ function start_kubelet {
if [[ -n "${NET_PLUGIN}" ]]; then
net_plugin_args="--network-plugin=${NET_PLUGIN}"
fi
net_plugin_dir_args=""
if [[ -n "${NET_PLUGIN_DIR}" ]]; then
net_plugin_dir_args="--network-plugin-dir=${NET_PLUGIN_DIR}"
@ -427,7 +426,7 @@ function start_kubelet {
# dockerized kubelet that might be running.
cleanup_dockerized_kubelet
cred_bind=""
# path to cloud credentails.
# path to cloud credentails.
cloud_cred=""
if [ "${CLOUD_PROVIDER}" == "aws" ]; then
cloud_cred="${HOME}/.aws/credentials"
@ -545,7 +544,7 @@ To start using your cluster, open up another terminal/tab and run:
cluster/kubectl.sh config use-context local
cluster/kubectl.sh
EOF
else
else
cat <<EOF
The kubelet was started.
@ -586,7 +585,7 @@ fi
if [[ "${START_MODE}" != "nokubelet" ]]; then
start_kubelet
fi
fi
print_success