Enable logexporter mechanism to dump logs from k8s nodes to GCS directly

pull/6/head
Shyam Jeedigunta 2017-07-08 02:18:10 +02:00
parent a9bf44101b
commit 5f8cb3d9ff
5 changed files with 442 additions and 4 deletions

363
cluster/log-dump/log-dump.sh Executable file
View File

@ -0,0 +1,363 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Call this to dump all master and node logs into the folder specified in $1
# (defaults to _artifacts). Only works if the provider supports SSH.
set -o errexit
set -o nounset
set -o pipefail
readonly report_dir="${1:-_artifacts}"
# In order to more trivially extend log-dump for custom deployments,
# check for a function named log_dump_custom_get_instances. If it's
# defined, we assume the function can me called with one argument, the
# role, which is either "master" or "node".
if [[ $(type -t log_dump_custom_get_instances) == "function" ]]; then
readonly use_custom_instance_list=yes
else
readonly use_custom_instance_list=
fi
readonly master_ssh_supported_providers="gce aws kubemark"
readonly node_ssh_supported_providers="gce gke aws kubemark"
readonly master_logfiles="kube-apiserver kube-scheduler rescheduler kube-controller-manager etcd etcd-events glbc cluster-autoscaler kube-addon-manager fluentd"
readonly node_logfiles="kube-proxy fluentd node-problem-detector"
readonly node_systemd_services="node-problem-detector"
readonly hollow_node_logfiles="kubelet-hollow-node-* kubeproxy-hollow-node-* npd-*"
readonly aws_logfiles="cloud-init-output"
readonly gce_logfiles="startupscript"
readonly kern_logfile="kern"
readonly initd_logfiles="docker"
readonly supervisord_logfiles="kubelet supervisor/supervisord supervisor/kubelet-stdout supervisor/kubelet-stderr supervisor/docker-stdout supervisor/docker-stderr"
readonly systemd_services="kubelet docker"
# Limit the number of concurrent node connections so that we don't run out of
# file descriptors for large clusters.
readonly max_scp_processes=25
# This template spits out the external IPs and images for each node in the cluster in a format like so:
# 52.32.7.85 gcr.io/google_containers/kube-apiserver:1355c18c32d7bef16125120bce194fad gcr.io/google_containers/kube-controller-manager:46365cdd8d28b8207950c3c21d1f3900 [...]
readonly ips_and_images='{range .items[*]}{@.status.addresses[?(@.type == "ExternalIP")].address} {@.status.images[*].names[*]}{"\n"}{end}'
function setup() {
if [[ -z "${use_custom_instance_list}" ]]; then
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
: ${KUBE_CONFIG_FILE:="config-test.sh"}
source "${KUBE_ROOT}/cluster/kube-util.sh"
detect-project &> /dev/null
elif [[ -z "${LOG_DUMP_SSH_KEY:-}" ]]; then
echo "LOG_DUMP_SSH_KEY not set, but required when using log_dump_custom_get_instances"
exit 1
elif [[ -z "${LOG_DUMP_SSH_USER:-}" ]]; then
echo "LOG_DUMP_SSH_USER not set, but required when using log_dump_custom_get_instances"
exit 1
fi
}
function log-dump-ssh() {
if [[ -z "${use_custom_instance_list}" ]]; then
ssh-to-node "$@"
return
fi
local host="$1"
local cmd="$2"
ssh -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${host}" "${cmd}"
}
# Copy all files /var/log/{$3}.log on node $1 into local dir $2.
# $3 should be a space-separated string of files.
# This function shouldn't ever trigger errexit, but doesn't block stderr.
function copy-logs-from-node() {
local -r node="${1}"
local -r dir="${2}"
local files=( ${3} )
# Append ".log*"
# The * at the end is needed to also copy rotated logs (which happens
# in large clusters and long runs).
files=( "${files[@]/%/.log*}" )
# Prepend "/var/log/"
files=( "${files[@]/#/\/var\/log\/}" )
# Comma delimit (even the singleton, or scp does the wrong thing), surround by braces.
local -r scp_files="{$(printf "%s," "${files[@]}")}"
if [[ -n "${use_custom_instance_list}" ]]; then
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${node}:${scp_files}" "${dir}" > /dev/null || true
else
case "${KUBERNETES_PROVIDER}" in
gce|gke|kubemark)
# get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information
gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true
gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true
;;
aws)
local ip=$(get_ssh_hostname "${node}")
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true
;;
esac
fi
}
# Save logs for node $1 into directory $2. Pass in any non-common files in $3.
# Pass in any non-common systemd services in $4.
# $3 and $4 should be a space-separated list of files.
# This function shouldn't ever trigger errexit
function save-logs() {
local -r node_name="${1}"
local -r dir="${2}"
local files="${3}"
local opt_systemd_services="${4:-""}"
if [[ -n "${use_custom_instance_list}" ]]; then
if [[ -n "${LOG_DUMP_SAVE_LOGS:-}" ]]; then
files="${files} ${LOG_DUMP_SAVE_LOGS:-}"
fi
else
case "${KUBERNETES_PROVIDER}" in
gce|gke|kubemark)
files="${files} ${gce_logfiles}"
if [[ "${KUBERNETES_PROVIDER}" == "kubemark" && "${ENABLE_HOLLOW_NODE_LOGS:-}" == "true" ]]; then
files="${files} ${hollow_node_logfiles}"
fi
;;
aws)
files="${files} ${aws_logfiles}"
;;
esac
fi
local -r services=( ${systemd_services} ${opt_systemd_services} ${LOG_DUMP_SAVE_SERVICES:-} )
if log-dump-ssh "${node_name}" "command -v journalctl" &> /dev/null; then
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-installation.service" > "${dir}/kube-node-installation.log" || true
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-configuration.service" > "${dir}/kube-node-configuration.log" || true
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -k" > "${dir}/kern.log" || true
for svc in "${services[@]}"; do
log-dump-ssh "${node_name}" "sudo journalctl --output=cat -u ${svc}.service" > "${dir}/${svc}.log" || true
done
else
files="${kern_logfile} ${files} ${initd_logfiles} ${supervisord_logfiles}"
fi
echo "Changing logfiles to be world-readable for download"
log-dump-ssh "${node_name}" "sudo chmod -R a+r /var/log" || true
echo "Copying '${files}' from ${node_name}"
copy-logs-from-node "${node_name}" "${dir}" "${files}"
}
function dump_masters() {
local master_names
if [[ -n "${use_custom_instance_list}" ]]; then
master_names=( $(log_dump_custom_get_instances master) )
elif [[ ! "${master_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
echo "Master SSH not supported for ${KUBERNETES_PROVIDER}"
return
else
if ! (detect-master &> /dev/null); then
echo "Master not detected. Is the cluster up?"
return
fi
master_names=( "${MASTER_NAME}" )
fi
if [[ "${#master_names[@]}" == 0 ]]; then
echo "No masters found?"
return
fi
proc=${max_scp_processes}
for master_name in "${master_names[@]}"; do
master_dir="${report_dir}/${master_name}"
mkdir -p "${master_dir}"
save-logs "${master_name}" "${master_dir}" "${master_logfiles}" &
# We don't want to run more than ${max_scp_processes} at a time, so
# wait once we hit that many nodes. This isn't ideal, since one might
# take much longer than the others, but it should help.
proc=$((proc - 1))
if [[ proc -eq 0 ]]; then
proc=${max_scp_processes}
wait
fi
done
# Wait for any remaining processes.
if [[ proc -gt 0 && proc -lt ${max_scp_processes} ]]; then
wait
fi
}
function dump_nodes() {
local node_names
if [[ -n "$1" ]]; then
echo "Dumping logs for nodes provided as args to dump_nodes() function"
node_names=( "$@" )
elif [[ -n "${use_custom_instance_list}" ]]; then
echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function"
node_names=( $(log_dump_custom_get_instances node) )
elif [[ ! "${node_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
echo "Node SSH not supported for ${KUBERNETES_PROVIDER}"
return
else
echo "Detecting nodes in the cluster"
detect-node-names &> /dev/null
node_names=( "${NODE_NAMES[@]}" )
fi
if [[ "${#node_names[@]}" == 0 ]]; then
echo "No nodes found!"
return
fi
nodes_selected_for_logs=()
if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then
# We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs.
for index in `shuf -i 0-$(( ${#node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES}`
do
nodes_selected_for_logs+=("${node_names[$index]}")
done
else
nodes_selected_for_logs=( "${node_names[@]}" )
fi
proc=${max_scp_processes}
for node_name in "${nodes_selected_for_logs[@]}"; do
node_dir="${report_dir}/${node_name}"
mkdir -p "${node_dir}"
# Save logs in the background. This speeds up things when there are
# many nodes.
save-logs "${node_name}" "${node_dir}" "${node_logfiles}" "${node_systemd_services}" &
# We don't want to run more than ${max_scp_processes} at a time, so
# wait once we hit that many nodes. This isn't ideal, since one might
# take much longer than the others, but it should help.
proc=$((proc - 1))
if [[ proc -eq 0 ]]; then
proc=${max_scp_processes}
wait
fi
done
# Wait for any remaining processes.
if [[ proc -gt 0 && proc -lt ${max_scp_processes} ]]; then
wait
fi
}
function dump_nodes_with_logexporter() {
echo "Detecting nodes in the cluster"
detect-node-names &> /dev/null
if [[ "${#NODE_NAMES[@]}" == 0 ]]; then
echo "No nodes found!"
return
fi
# Obtain parameters required by logexporter.
local -r service_account_credentials="$(cat ${GOOGLE_APPLICATION_CREDENTIALS} | base64)"
local -r cloud_provider="${KUBERNETES_PROVIDER}"
local -r gcs_artifacts_dir="${GCS_ARTIFACTS_DIR}"
local -r enable_hollow_node_logs="${ENABLE_HOLLOW_NODE_LOGS:-false}"
local -r logexport_timeout_seconds="$(( 30 + NUM_NODES / 10 ))"
# Fill in the parameters in the logexporter daemonset template.
sed -i'' -e "s/{{.ServiceAccountCredentials}}/${service_account_credentials}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
sed -i'' -e "s/{{.CloudProvider}}/${cloud_provider}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
sed -i'' -e "s/{{.GCSPath}}/${gcs_artifacts_dir}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
sed -i'' -e "s/{{.EnableHollowNodeLogs}}/${enable_hollow_node_logs}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
# Create the logexporter namespace, service-account secret and the logexporter daemonset within that namespace.
KUBECTL="${KUBECTL:-${KUBE_ROOT}/cluster/kubectl.sh}"
"${KUBECTL}" create -f "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
# Give some time for the pods to finish uploading logs.
sleep "${logexport_sleep_seconds}"
# List the logexporter pods created and their corresponding nodes.
pods_and_nodes=()
for retry in {1..5}; do
pods_and_nodes=$(${KUBECTL} get pods -n logexporter -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName | tail -n +2)
if [[ -n "${pods_and_nodes}" ]]; then
echo -e "List of logexporter pods found:\n${pods_and_nodes}"
break
fi
if [[ "${retry}" == 5 ]]; then
echo "Failed to list any logexporter pods after multiple retries.. falling back to logdump for nodes through SSH"
"${KUBECTL}" delete namespace logexporter
dump_nodes "${NODE_NAMES[@]}"
return
fi
done
# Collect names of nodes we didn't find a logexporter pod on.
# Note: This step is O(#nodes^2) as we check if each node is present in the list of nodes running logexporter.
# Making it linear would add code complexity without much benefit (as it just takes < 1s for 5k nodes anyway).
failed_nodes=()
for node in "${NODE_NAMES[@]}"; do
if [[ ! "${pods_and_nodes}" =~ "${node}" ]]; then
failed_nodes+=("${node}")
fi
done
# Collect names of nodes whose logexporter pod didn't succeed.
# TODO(shyamjvs): Parallelize the for loop below to make it faster (if needed).
logexporter_pods=( $(echo "${pods_and_nodes}" | awk '{print $1}') )
logexporter_nodes=( $(echo "${pods_and_nodes}" | awk '{print $2}') )
for index in "${!logexporter_pods[@]}"; do
pod="${logexporter_pods[$index]}"
node="${logexporter_nodes[$index]}"
# TODO(shyamjvs): Use a /status endpoint on the pod instead of checking its logs if that's faster.
pod_success_log=$(${KUBECTL} get logs ${pod} -n logexporter 2>&1 | grep "Logs successfully uploaded") || true
if [[ -z "${pod_success_log}" ]]; then
failed_nodes+=("${node}")
fi
done
# Delete the logexporter resources and dump logs for the failed nodes (if any) through SSH.
"${KUBECTL}" delete namespace logexporter
if [[ "${#failed_nodes[@]}" != 0 ]]; then
echo -e "Dumping logs through SSH for nodes logexporter failed to succeed on:\n${failed_nodes[@]}"
dump_nodes "${failed_nodes[@]}"
fi
}
function main() {
setup
# Copy master logs to artifacts dir locally (through SSH).
echo "Dumping logs from master locally to '${report_dir}'"
dump_masters
if [[ "${DUMP_ONLY_MASTER_LOGS:-}" == "true" ]]; then
echo "Skipping dumping of node logs"
return
fi
# Copy logs from nodes to GCS directly or to artifacts dir locally (through SSH).
if [[ "${ENABLE_LOGEXPORTER:-}" == "true" ]]; then
if [[ -z "${GCS_ARTIFACTS_DIR:-}" ]]; then
echo "Env var GCS_ARTIFACTS_DIR is empty. Failed to dump node logs to GCS."
exit 1
fi
echo "Dumping logs from nodes to GCS directly at '${GCS_ARTIFACTS_DIR}'"
dump_nodes_with_logexporter
else
echo "Dumping logs from nodes locally to '${report_dir}'"
dump_nodes
fi
}
main

View File

@ -0,0 +1,74 @@
# Template job config for running the log exporter on the cluster as a daemonset.
# Creates everything within 'logexporter' namespace.
#
# Note: Since daemonsets have "AlwaysRestart" policy for pods, we provide a long
# sleep-duration (24 hr) to the logexporter pods so they don't finish the work and
# get restarted while some pods are still running. So it is your duty to detect
# the work has been done (or use some timeout) and delete the daemonset yourself.
apiVersion: v1
kind: Namespace
metadata:
name: logexporter
---
apiVersion: v1
kind: Secret
metadata:
name: google-service-account
namespace: logexporter
type: Opaque
data:
service-account.json: {{.ServiceAccountCredentials}}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: logexporter
namespace: logexporter
spec:
template:
metadata:
labels:
app: logexporter
spec:
containers:
- name: logexporter-test
image: gcr.io/google-containers/logexporter:v0.1.0
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- logexporter
- --node-name=$(NODE_NAME)
- --cloud-provider={{.CloudProvider}}
- --gcs-path={{.GCSPath}}
- --gcloud-auth-file-path=/etc/service-account/service-account.json
- --enable-hollow-node-logs={{.EnableHollowNodeLogs}}
- --sleep-duration=24h
- --alsologtostderr
volumeMounts:
- mountPath: /etc/service-account
name: service
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: true
- mountPath: /workspace/etc
name: hostetc
readOnly: true
resources:
requests:
cpu: 10m
memory: 10Mi
volumes:
- name: service
secret:
secretName: google-service-account
- name: varlog
hostPath:
path: /var/log
- name: hostetc
hostPath:
path: /etc

View File

@ -53,6 +53,7 @@ cluster/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]}
cluster/log-dump.sh: local -r node_name="${1}"
cluster/log-dump.sh: for node_name in "${node_names[@]}"; do
cluster/log-dump.sh:readonly report_dir="${1:-_artifacts}"
cluster/log-dump/log-dump.sh:readonly report_dir="${1:-_artifacts}"
cluster/photon-controller/templates/salt-master.sh: api_servers: $MASTER_NAME
cluster/photon-controller/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
cluster/photon-controller/util.sh: node_ip=$(${PHOTON} vm networks "${node_id}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}')

View File

@ -4287,17 +4287,17 @@ func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeo
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
// It shells out to cluster/log-dump.sh to accomplish this.
// It shells out to cluster/log-dump/log-dump.sh to accomplish this.
func CoreDump(dir string) {
if TestContext.DisableLogDump {
Logf("Skipping dumping logs from cluster")
return
}
cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump.sh"), dir)
cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
Logf("Error running cluster/log-dump.sh: %v", err)
Logf("Error running cluster/log-dump/log-dump.sh: %v", err)
}
}

View File

@ -25,4 +25,4 @@ source "${KUBE_ROOT}/cluster/kubemark/util.sh"
detect-master
echo "Dumping logs for kubemark master: ${MASTER_NAME}"
DUMP_ONLY_MASTER_LOGS=true ${KUBE_ROOT}/cluster/log-dump.sh "${REPORT_DIR}"
DUMP_ONLY_MASTER_LOGS=true ${KUBE_ROOT}/cluster/log-dump/log-dump.sh "${REPORT_DIR}"