Both @satnam6502 and E2E tests confirm: This code is no longer needed

Deletion is wonderful. The only weird thing was where to put the
message about the proxy URLs. Satnam suggested kubectl clusterinfo,
which seemed like a good option to put at the end of cluster turn-up.
pull/6/head
Zach Loafman 2015-03-19 16:09:32 -07:00
parent 1dd4600eb6
commit 407d1fec45
10 changed files with 3 additions and 159 deletions

View File

@ -739,14 +739,6 @@ function kube-push {
}
function setup-logging-firewall {
echo "TODO: setup logging"
}
function teardown-logging-firewall {
echo "TODO: teardown logging"
}
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
@ -804,55 +796,6 @@ function restart-apiserver {
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
}
function setup-logging-firewall {
# If logging with Fluentd to Elasticsearch is enabled then create pods
# and services for Elasticsearch (for ingesting logs) and Kibana (for
# viewing logs).
if [[ "${ENABLE_NODE_LOGGING-}" != "true" ]] || \
[[ "${LOGGING_DESTINATION-}" != "elasticsearch" ]] || \
[[ "${ENABLE_CLUSTER_LOGGING-}" != "true" ]]; then
return
fi
# TODO: Support logging
echo "Logging setup is not (yet) supported on AWS"
# detect-project
# gcloud compute firewall-rules create "${INSTANCE_PREFIX}-fluentd-elasticsearch-logging" --project "${PROJECT}" \
# --allow tcp:5601 tcp:9200 tcp:9300 --target-tags "${MINION_TAG}" --network="${NETWORK}"
#
# # This should be nearly instant once kube-addons gets a chance to
# # run, and we already know we can hit the apiserver, but it's still
# # worth checking.
# echo "waiting for logging services to be created by the master."
# local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
# for i in `seq 1 10`; do
# if "${kubectl}" get services -l name=kibana-logging -o template -t {{range.items}}{{.id}}{{end}} | grep -q kibana-logging &&
# "${kubectl}" get services -l name=elasticsearch-logging -o template -t {{range.items}}{{.id}}{{end}} | grep -q elasticsearch-logging; then
# break
# fi
# sleep 10
# done
#
# local -r region="${ZONE::-2}"
# local -r es_ip=$(gcloud compute forwarding-rules --project "${PROJECT}" describe --region "${region}" elasticsearch-logging | grep IPAddress | awk '{print $2}')
# local -r kibana_ip=$(gcloud compute forwarding-rules --project "${PROJECT}" describe --region "${region}" kibana-logging | grep IPAddress | awk '{print $2}')
# echo
# echo -e "${color_green}Cluster logs are ingested into Elasticsearch running at ${color_yellow}http://${es_ip}:9200"
# echo -e "${color_green}Kibana logging dashboard will be available at ${color_yellow}http://${kibana_ip}:5601${color_norm}"
# echo
}
function teardown-logging-firewall {
if [[ "${ENABLE_NODE_LOGGING-}" != "true" ]] || \
[[ "${LOGGING_DESTINATION-}" != "elasticsearch" ]] || \
[[ "${ENABLE_CLUSTER_LOGGING-}" != "true" ]]; then
return
fi
# TODO: Support logging
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
# (AWS runs detect-project, I don't think we need to anything)

View File

@ -561,11 +561,3 @@ function restart-kube-proxy {
function restart-apiserver {
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
}
function setup-logging-firewall {
echo "TODO: setup logging"
}
function teardown-logging-firewall {
echo "TODO: teardown logging"
}

View File

@ -923,54 +923,6 @@ function restart-apiserver {
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
}
function setup-logging-firewall {
# If logging with Fluentd to Elasticsearch is enabled then create pods
# and services for Elasticsearch (for ingesting logs) and Kibana (for
# viewing logs).
if [[ "${ENABLE_NODE_LOGGING-}" != "true" ]] || \
[[ "${LOGGING_DESTINATION-}" != "elasticsearch" ]] || \
[[ "${ENABLE_CLUSTER_LOGGING-}" != "true" ]]; then
return
fi
detect-project
gcloud compute firewall-rules create "${INSTANCE_PREFIX}-fluentd-elasticsearch-logging" --project "${PROJECT}" \
--allow tcp:5601 tcp:9200 tcp:9300 --target-tags "${MINION_TAG}" --network="${NETWORK}"
# This should be nearly instant once kube-addons gets a chance to
# run, and we already know we can hit the apiserver, but it's still
# worth checking.
echo "waiting for logging services to be created by the master."
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
for i in `seq 1 10`; do
if "${kubectl}" get services -l name=kibana-logging -o template -t {{range.items}}{{.id}}{{end}} | grep -q kibana-logging &&
"${kubectl}" get services -l name=elasticsearch-logging -o template -t {{range.items}}{{.id}}{{end}} | grep -q elasticsearch-logging; then
break
fi
sleep 10
done
echo
echo -e "${color_green}Cluster logs are ingested into Elasticsearch running at ${color_yellow}https://${KUBE_MASTER_IP}/api/v1beta1/proxy/services/elasticsearch-logging/"
echo -e "${color_green}Kibana logging dashboard will be available at ${color_yellow}https://${KUBE_MASTER_IP}/api/v1beta1/proxy/services/kibana-logging/${color_norm} (note the trailing slash)"
echo
}
function teardown-logging-firewall {
if [[ "${ENABLE_NODE_LOGGING-}" != "true" ]] || \
[[ "${LOGGING_DESTINATION-}" != "elasticsearch" ]] || \
[[ "${ENABLE_CLUSTER_LOGGING-}" != "true" ]]; then
return
fi
detect-project
gcloud compute firewall-rules delete -q "${INSTANCE_PREFIX}-fluentd-elasticsearch-logging" --project "${PROJECT}" || true
# Also delete the logging services which will remove the associated forwarding rules (TCP load balancers).
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
"${kubectl}" delete services elasticsearch-logging || true
"${kubectl}" delete services kibana-logging || true
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
detect-project

View File

@ -267,11 +267,3 @@ function kube-down() {
"${GCLOUD}" preview container clusters delete --project="${PROJECT}" \
--zone="${ZONE}" "${CLUSTER_NAME}"
}
function setup-logging-firewall {
echo "TODO: setup logging"
}
function teardown-logging-firewall {
echo "TODO: teardown logging"
}

View File

@ -27,8 +27,6 @@ source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
echo "Bringing down cluster using provider: $KUBERNETES_PROVIDER"
verify-prereqs
teardown-logging-firewall
kube-down
echo "Done"

View File

@ -39,9 +39,8 @@ kube-up
echo "... calling validate-cluster" >&2
"${KUBE_ROOT}/cluster/validate-cluster.sh"
echo "... calling setup-logging-firewall" >&2
setup-logging-firewall
echo "Done" >&2
echo -e "Done, listing cluster services:\n" >&2
"${KUBE_ROOT}/cluster/kubectl.sh" clusterinfo
echo
exit 0

View File

@ -334,11 +334,3 @@ function restart-apiserver {
function prepare-e2e() {
echo "libvirt-coreos doesn't need special preparations for e2e tests" 1>&2
}
function setup-logging-firewall {
echo "TODO: setup logging"
}
function teardown-logging-firewall {
echo "TODO: teardown logging"
}

View File

@ -347,14 +347,6 @@ kube-up() {
echo
}
function setup-logging-firewall {
echo "TODO: setup logging"
}
function teardown-logging-firewall {
echo "TODO: teardown logging"
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
echo "Rackspace doesn't need special preparations for e2e tests"

View File

@ -343,11 +343,3 @@ function restart-apiserver {
function prepare-e2e() {
echo "Vagrant doesn't need special preparations for e2e tests" 1>&2
}
function setup-logging-firewall {
echo "TODO: setup logging"
}
function teardown-logging-firewall {
echo "TODO: teardown logging"
}

View File

@ -478,11 +478,3 @@ function test-setup {
function test-teardown {
echo "TODO"
}
function setup-logging-firewall {
echo "TODO: setup logging"
}
function teardown-logging-firewall {
echo "TODO: teardown logging"
}