Merge pull request #5960 from ixdy/kube-log-timestamps

Update kube::log::{error,status} to include timestamp.
pull/6/head
Quinton Hoole 2015-03-25 16:08:52 -07:00
commit 3bfca87492
2 changed files with 37 additions and 35 deletions

View File

@ -109,7 +109,7 @@ readonly RELEASE_DIR="${LOCAL_OUTPUT_ROOT}/release-tars"
# KUBE_BUILD_DATA_CONTAINER_NAME # KUBE_BUILD_DATA_CONTAINER_NAME
# DOCKER_MOUNT_ARGS # DOCKER_MOUNT_ARGS
function kube::build::verify_prereqs() { function kube::build::verify_prereqs() {
echo "+++ Verifying Prerequisites...." kube::log::status "Verifying Prerequisites...."
if [[ "${1-}" != "clean" ]]; then if [[ "${1-}" != "clean" ]]; then
if [[ -z "$(which docker)" ]]; then if [[ -z "$(which docker)" ]]; then
@ -130,10 +130,10 @@ function kube::build::verify_prereqs() {
exit 1 exit 1
else else
# Reach over and set the clock. After sleep/resume the clock will skew. # Reach over and set the clock. After sleep/resume the clock will skew.
echo "+++ Setting boot2docker clock" kube::log::status "Setting boot2docker clock"
boot2docker ssh sudo date -u -D "%Y%m%d%H%M.%S" --set "$(date -u +%Y%m%d%H%M.%S)" >/dev/null boot2docker ssh sudo date -u -D "%Y%m%d%H%M.%S" --set "$(date -u +%Y%m%d%H%M.%S)" >/dev/null
if [[ -z "$DOCKER_HOST" ]]; then if [[ -z "$DOCKER_HOST" ]]; then
echo "+++ Setting boot2docker env variables" kube::log::status "Setting boot2docker env variables"
$(boot2docker shellinit) $(boot2docker shellinit)
fi fi
fi fi
@ -162,7 +162,7 @@ function kube::build::verify_prereqs() {
if [[ ! -z "$(which boot2docker)" ]]; then if [[ ! -z "$(which boot2docker)" ]]; then
if [[ $(boot2docker status) == "running" ]]; then if [[ $(boot2docker status) == "running" ]]; then
if [[ -z "$DOCKER_HOST" ]]; then if [[ -z "$DOCKER_HOST" ]]; then
echo "+++ Setting boot2docker env variables" kube::log::status "Setting boot2docker env variables"
$(boot2docker shellinit) $(boot2docker shellinit)
fi fi
fi fi
@ -190,17 +190,17 @@ function kube::build::clean_output() {
# Clean out the output directory if it exists. # Clean out the output directory if it exists.
if kube::build::has_docker ; then if kube::build::has_docker ; then
if kube::build::build_image_built ; then if kube::build::build_image_built ; then
echo "+++ Cleaning out _output/dockerized/bin/ via docker build image" kube::log::status "Cleaning out _output/dockerized/bin/ via docker build image"
kube::build::run_build_command bash -c "rm -rf '${REMOTE_OUTPUT_BINPATH}'/*" kube::build::run_build_command bash -c "rm -rf '${REMOTE_OUTPUT_BINPATH}'/*"
else else
echo "!!! Build image not built. Cannot clean via docker build image." kube::log::error "Build image not built. Cannot clean via docker build image."
fi fi
echo "+++ Removing data container" kube::log::status "Removing data container"
"${DOCKER[@]}" rm -v "${KUBE_BUILD_DATA_CONTAINER_NAME}" >/dev/null 2>&1 || true "${DOCKER[@]}" rm -v "${KUBE_BUILD_DATA_CONTAINER_NAME}" >/dev/null 2>&1 || true
fi fi
echo "+++ Cleaning out local _output directory" kube::log::status "Cleaning out local _output directory"
rm -rf "${LOCAL_OUTPUT_ROOT}" rm -rf "${LOCAL_OUTPUT_ROOT}"
} }
@ -216,7 +216,7 @@ function kube::build::prepare_output() {
selinuxenabled && \ selinuxenabled && \
which chcon >/dev/null ; then which chcon >/dev/null ; then
if [[ ! $(ls -Zd "${LOCAL_OUTPUT_ROOT}") =~ svirt_sandbox_file_t ]] ; then if [[ ! $(ls -Zd "${LOCAL_OUTPUT_ROOT}") =~ svirt_sandbox_file_t ]] ; then
echo "+++ Applying SELinux policy to '_output' directory." kube::log::status "Applying SELinux policy to '_output' directory."
if ! chcon -Rt svirt_sandbox_file_t "${LOCAL_OUTPUT_ROOT}"; then if ! chcon -Rt svirt_sandbox_file_t "${LOCAL_OUTPUT_ROOT}"; then
echo " ***Failed***. This may be because you have root owned files under _output." echo " ***Failed***. This may be because you have root owned files under _output."
echo " Continuing, but this build may fail later if SELinux prevents access." echo " Continuing, but this build may fail later if SELinux prevents access."
@ -236,7 +236,7 @@ function kube::build::has_docker() {
# #2 - image tag # #2 - image tag
function kube::build::docker_image_exists() { function kube::build::docker_image_exists() {
[[ -n $1 && -n $2 ]] || { [[ -n $1 && -n $2 ]] || {
echo "!!! Internal error. Image not specified in docker_image_exists." >&2 kube::log::error "Internal error. Image not specified in docker_image_exists."
exit 2 exit 2
} }
@ -248,7 +248,7 @@ function kube::build::docker_image_exists() {
# Takes $1 and computes a short has for it. Useful for unique tag generation # Takes $1 and computes a short has for it. Useful for unique tag generation
function kube::build::short_hash() { function kube::build::short_hash() {
[[ $# -eq 1 ]] || { [[ $# -eq 1 ]] || {
echo "!!! Internal error. No data based to short_hash." >&2 kube::log::error "Internal error. No data based to short_hash."
exit 2 exit 2
} }
@ -291,7 +291,7 @@ function kube::build::ensure_golang() {
} }
} }
echo "+++ Pulling docker image: golang:${KUBE_BUILD_GOLANG_VERSION}" kube::log::status "Pulling docker image: golang:${KUBE_BUILD_GOLANG_VERSION}"
"${DOCKER[@]}" pull golang:${KUBE_BUILD_GOLANG_VERSION} "${DOCKER[@]}" pull golang:${KUBE_BUILD_GOLANG_VERSION}
} }
} }
@ -346,7 +346,7 @@ function kube::build::docker_build() {
local -r context_dir=$2 local -r context_dir=$2
local -ra build_cmd=("${DOCKER[@]}" build -t "${image}" "${context_dir}") local -ra build_cmd=("${DOCKER[@]}" build -t "${image}" "${context_dir}")
echo "+++ Building Docker image ${image}." kube::log::status "Building Docker image ${image}."
local docker_output local docker_output
docker_output=$("${build_cmd[@]}" 2>&1) || { docker_output=$("${build_cmd[@]}" 2>&1) || {
cat <<EOF >&2 cat <<EOF >&2
@ -366,7 +366,7 @@ EOF
function kube::build::clean_image() { function kube::build::clean_image() {
local -r image=$1 local -r image=$1
echo "+++ Deleting docker image ${image}" kube::log::status "Deleting docker image ${image}"
"${DOCKER[@]}" rmi ${image} 2> /dev/null || true "${DOCKER[@]}" rmi ${image} 2> /dev/null || true
} }
@ -375,13 +375,13 @@ function kube::build::clean_images() {
kube::build::clean_image "${KUBE_BUILD_IMAGE}" kube::build::clean_image "${KUBE_BUILD_IMAGE}"
echo "+++ Cleaning all other untagged docker images" kube::log::status "Cleaning all other untagged docker images"
"${DOCKER[@]}" rmi $("${DOCKER[@]}" images -q --filter 'dangling=true') 2> /dev/null || true "${DOCKER[@]}" rmi $("${DOCKER[@]}" images -q --filter 'dangling=true') 2> /dev/null || true
} }
function kube::build::ensure_data_container() { function kube::build::ensure_data_container() {
if ! "${DOCKER[@]}" inspect "${KUBE_BUILD_DATA_CONTAINER_NAME}" >/dev/null 2>&1; then if ! "${DOCKER[@]}" inspect "${KUBE_BUILD_DATA_CONTAINER_NAME}" >/dev/null 2>&1; then
echo "+++ Creating data container" kube::log::status "Creating data container"
local -ra docker_cmd=( local -ra docker_cmd=(
"${DOCKER[@]}" run "${DOCKER[@]}" run
"${DOCKER_DATA_MOUNT_ARGS[@]}" "${DOCKER_DATA_MOUNT_ARGS[@]}"
@ -396,7 +396,7 @@ function kube::build::ensure_data_container() {
# Run a command in the kube-build image. This assumes that the image has # Run a command in the kube-build image. This assumes that the image has
# already been built. This will sync out all output data from the build. # already been built. This will sync out all output data from the build.
function kube::build::run_build_command() { function kube::build::run_build_command() {
echo "+++ Running build command...." kube::log::status "Running build command...."
[[ $# != 0 ]] || { echo "Invalid input." >&2; return 4; } [[ $# != 0 ]] || { echo "Invalid input." >&2; return 4; }
kube::build::ensure_data_container kube::build::ensure_data_container
@ -455,7 +455,7 @@ function kube::build::copy_output() {
"${DOCKER[@]}" run "${docker_run_opts[@]}" "${KUBE_BUILD_IMAGE}" "${DOCKER[@]}" run "${docker_run_opts[@]}" "${KUBE_BUILD_IMAGE}"
) )
echo "+++ Syncing back _output/dockerized/bin directory from remote Docker" kube::log::status "Syncing back _output/dockerized/bin directory from remote Docker"
rm -rf "${LOCAL_OUTPUT_BINPATH}" rm -rf "${LOCAL_OUTPUT_BINPATH}"
mkdir -p "${LOCAL_OUTPUT_BINPATH}" mkdir -p "${LOCAL_OUTPUT_BINPATH}"
@ -473,7 +473,7 @@ function kube::build::copy_output() {
let count=count+1 let count=count+1
if [[ $count -eq 60 ]]; then if [[ $count -eq 60 ]]; then
# break after 5m # break after 5m
echo "!!! Timed out waiting for binaries..." kube::log::error "Timed out waiting for binaries..."
break break
fi fi
sleep 5 sleep 5
@ -481,7 +481,7 @@ function kube::build::copy_output() {
"${DOCKER[@]}" rm -f -v "${KUBE_BUILD_CONTAINER_NAME}" >/dev/null 2>&1 || true "${DOCKER[@]}" rm -f -v "${KUBE_BUILD_CONTAINER_NAME}" >/dev/null 2>&1 || true
else else
echo "+++ Output directory is local. No need to copy results out." kube::log::status "Output directory is local. No need to copy results out."
fi fi
} }
@ -513,7 +513,7 @@ function kube::release::package_client_tarballs() {
platforms=($(cd "${LOCAL_OUTPUT_BINPATH}" ; echo */*)) platforms=($(cd "${LOCAL_OUTPUT_BINPATH}" ; echo */*))
for platform in "${platforms[@]}" ; do for platform in "${platforms[@]}" ; do
local platform_tag=${platform/\//-} # Replace a "/" for a "-" local platform_tag=${platform/\//-} # Replace a "/" for a "-"
echo "+++ Building tarball: client $platform_tag" kube::log::status "Building tarball: client $platform_tag"
local release_stage="${RELEASE_STAGE}/client/${platform_tag}/kubernetes" local release_stage="${RELEASE_STAGE}/client/${platform_tag}/kubernetes"
rm -rf "${release_stage}" rm -rf "${release_stage}"
@ -542,7 +542,7 @@ function kube::release::package_server_tarballs() {
local platform local platform
for platform in "${KUBE_SERVER_PLATFORMS[@]}" ; do for platform in "${KUBE_SERVER_PLATFORMS[@]}" ; do
local platform_tag=${platform/\//-} # Replace a "/" for a "-" local platform_tag=${platform/\//-} # Replace a "/" for a "-"
echo "+++ Building tarball: server $platform_tag" kube::log::status "Building tarball: server $platform_tag"
local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes" local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
rm -rf "${release_stage}" rm -rf "${release_stage}"
@ -572,7 +572,7 @@ function kube::release::package_server_tarballs() {
# Package up the salt configuration tree. This is an optional helper to getting # Package up the salt configuration tree. This is an optional helper to getting
# a cluster up and running. # a cluster up and running.
function kube::release::package_salt_tarball() { function kube::release::package_salt_tarball() {
echo "+++ Building tarball: salt" kube::log::status "Building tarball: salt"
local release_stage="${RELEASE_STAGE}/salt/kubernetes" local release_stage="${RELEASE_STAGE}/salt/kubernetes"
rm -rf "${release_stage}" rm -rf "${release_stage}"
@ -596,7 +596,7 @@ function kube::release::package_salt_tarball() {
# This is the stuff you need to run tests from the binary distribution. # This is the stuff you need to run tests from the binary distribution.
function kube::release::package_test_tarball() { function kube::release::package_test_tarball() {
echo "+++ Building tarball: test" kube::log::status "Building tarball: test"
local release_stage="${RELEASE_STAGE}/test/kubernetes" local release_stage="${RELEASE_STAGE}/test/kubernetes"
rm -rf "${release_stage}" rm -rf "${release_stage}"
@ -627,7 +627,7 @@ function kube::release::package_test_tarball() {
# - tarballs for server binary and salt configs that are ready to be uploaded # - tarballs for server binary and salt configs that are ready to be uploaded
# to master by whatever means appropriate. # to master by whatever means appropriate.
function kube::release::package_full_tarball() { function kube::release::package_full_tarball() {
echo "+++ Building tarball: full" kube::log::status "Building tarball: full"
local release_stage="${RELEASE_STAGE}/full/kubernetes" local release_stage="${RELEASE_STAGE}/full/kubernetes"
rm -rf "${release_stage}" rm -rf "${release_stage}"
@ -764,11 +764,11 @@ function kube::release::gcs::copy_release_artifacts() {
gcs_options=("-h" "Cache-Control:private, max-age=0") gcs_options=("-h" "Cache-Control:private, max-age=0")
fi fi
echo "+++ Copying release artifacts to ${gcs_destination}" kube::log::status "Copying release artifacts to ${gcs_destination}"
# First delete all objects at the destination # First delete all objects at the destination
if gsutil ls "${gcs_destination}" >/dev/null 2>&1; then if gsutil ls "${gcs_destination}" >/dev/null 2>&1; then
echo "!!! ${gcs_destination} not empty." kube::log::error "${gcs_destination} not empty."
read -p "Delete everything under ${gcs_destination}? [y/n] " -r || { read -p "Delete everything under ${gcs_destination}? [y/n] " -r || {
echo "EOF on prompt. Skipping upload" echo "EOF on prompt. Skipping upload"
return return
@ -810,7 +810,7 @@ function kube::release::gcs::copy_release_artifacts() {
# extra credit. # extra credit.
if [[ ${KUBE_GCS_MAKE_PUBLIC} =~ ^[yY]$ ]]; then if [[ ${KUBE_GCS_MAKE_PUBLIC} =~ ^[yY]$ ]]; then
echo "+++ Marking all uploaded objects public" kube::log::status "Marking all uploaded objects public"
gsutil acl ch -R -g all:R "${gcs_destination}" >/dev/null 2>&1 gsutil acl ch -R -g all:R "${gcs_destination}" >/dev/null 2>&1
fi fi
@ -830,7 +830,7 @@ function kube::release::gcs::publish_latest() {
gsutil acl ch -R -g all:R "${latest_file_dst}" >/dev/null 2>&1 gsutil acl ch -R -g all:R "${latest_file_dst}" >/dev/null 2>&1
fi fi
echo "+++ gsutil cat ${latest_file_dst}:" kube::log::status "gsutil cat ${latest_file_dst}:"
gsutil cat ${latest_file_dst} gsutil cat ${latest_file_dst}
} }
@ -842,7 +842,7 @@ function kube::release::gcs::publish_latest_official() {
local -r version_regex="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)$" local -r version_regex="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)$"
[[ ${new_version} =~ ${version_regex} ]] || { [[ ${new_version} =~ ${version_regex} ]] || {
echo "!!! publish_latest_official passed bogus value: '${new_version}'" >&2 kube::log::error "publish_latest_official passed bogus value: '${new_version}'"
return 1 return 1
} }
@ -854,7 +854,7 @@ function kube::release::gcs::publish_latest_official() {
gcs_version=$(gsutil cat "${latest_file_dst}") gcs_version=$(gsutil cat "${latest_file_dst}")
[[ ${gcs_version} =~ ${version_regex} ]] || { [[ ${gcs_version} =~ ${version_regex} ]] || {
echo "!!! ${latest_file_dst} contains invalid release version, can't compare: '${gcs_version}'" >&2 kube::log::error "${latest_file_dst} contains invalid release version, can't compare: '${gcs_version}'"
return 1 return 1
} }
@ -876,10 +876,10 @@ function kube::release::gcs::publish_latest_official() {
fi fi
if [[ "${greater}" != "true" ]]; then if [[ "${greater}" != "true" ]]; then
echo "+++ ${gcs_version} (latest on GCS) >= ${new_version} (just uploaded), not updating ${latest_file_dst}" kube::log::status "${gcs_version} (latest on GCS) >= ${new_version} (just uploaded), not updating ${latest_file_dst}"
return 0 return 0
fi fi
echo "+++ ${new_version} (just uploaded) > ${gcs_version} (latest on GCS), updating ${latest_file_dst}" kube::log::status "${new_version} (just uploaded) > ${gcs_version} (latest on GCS), updating ${latest_file_dst}"
kube::release::gcs::publish_latest kube::release::gcs::publish_latest
} }

View File

@ -85,7 +85,8 @@ kube::log::error_exit() {
# Log an error but keep going. Don't dump the stack or exit. # Log an error but keep going. Don't dump the stack or exit.
kube::log::error() { kube::log::error() {
echo "!!! ${1-}" >&2 timestamp=$(date +"[%m%d %H:%M:%S]")
echo "!!! $timestamp ${1-}" >&2
shift shift
for message; do for message; do
echo " $message" >&2 echo " $message" >&2
@ -129,7 +130,8 @@ kube::log::info_from_stdin() {
# Print a status line. Formatted to show up in a stream of output. # Print a status line. Formatted to show up in a stream of output.
kube::log::status() { kube::log::status() {
echo "+++ $1" timestamp=$(date +"[%m%d %H:%M:%S]")
echo "+++ $timestamp $1"
shift shift
for message; do for message; do
echo " $message" echo " $message"