Merge pull request #3046 from mbforbes/gkeTestsSecure

Add GKE as a provider.
pull/6/head
roberthbailey 2014-12-19 12:00:13 -08:00
commit ddbc506820
11 changed files with 418 additions and 46 deletions

View File

@ -84,7 +84,7 @@ function detect-project () {
echo "'gcloud config set project <PROJECT>'" >&2
exit 1
fi
echo "Project: $PROJECT (autodetected from gcloud config)"
echo "Project: $PROJECT" >&2
}
@ -139,11 +139,9 @@ function upload-server-tars() {
# MINION_NAMES
# ZONE
# Vars set:
# KUBE_MINION_IP_ADDRESS (array)
# KUBE_MINION_IP_ADDRESSES (array)
function detect-minions () {
if [[ -z "${PROJECT-}" ]]; then
detect-project
fi
detect-project
KUBE_MINION_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
local minion_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
@ -166,15 +164,12 @@ function detect-minions () {
#
# Assumed vars:
# MASTER_NAME
# PROJECT (if unset, will detect-project)
# ZONE
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master () {
if [[ -z "${PROJECT-}" ]]; then
detect-project
fi
detect-project
KUBE_MASTER=${MASTER_NAME}
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
KUBE_MASTER_IP=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
@ -251,6 +246,7 @@ function wait-for-jobs {
# $2: IP ranges.
# $3: Target tags for this firewall rule.
function create-firewall-rule {
detect-project
local attempt=0
while true; do
if ! gcloud compute firewall-rules create "$1" \
@ -258,7 +254,7 @@ function create-firewall-rule {
--network "${NETWORK}" \
--source-ranges "$2" \
--target-tags "$3" \
--allow tcp udp icmp esp ah sctp; then
--allow tcp udp icmp esp ah sctp; then
if (( attempt > 5 )); then
echo -e "${color_red}Failed to create firewall rule $1 ${color_norm}"
exit 2
@ -275,6 +271,7 @@ function create-firewall-rule {
# $1: The name of the route.
# $2: IP range.
function create-route {
detect-project
local attempt=0
while true; do
if ! gcloud compute routes create "$1" \
@ -300,6 +297,7 @@ function create-route {
# $2: The scopes flag.
# $3: The minion start script.
function create-instance {
detect-project
local attempt=0
while true; do
if ! gcloud compute instances create "$1" \
@ -334,7 +332,6 @@ function create-instance {
# KUBE_ROOT
# <Various vars set in config file>
function kube-up {
# Detect the project into $PROJECT if it isn't set
detect-project
# Make sure we have the tar files staged on Google Storage
@ -570,18 +567,16 @@ EOF
}
# Delete a kubernetes cluster.
# Delete a kubernetes cluster. This is called from test-teardown.
#
# Assumed vars:
# MASTER_NAME
# INSTANCE_PREFIX
# ZONE
# PROJECT
# This function tears down cluster resources 10 at a time to avoid issuing too many
# API calls and exceeding API quota. It is important to bring down the instances before bringing
# down the firewall rules and routes.
function kube-down {
# Detect the project into $PROJECT
detect-project
echo "Bringing down cluster"
@ -685,15 +680,12 @@ function test-build-release {
}
# Execute prior to running tests to initialize required structure. This is
# called from hack/e2e-test.sh.
# called from hack/e2e.go only when running -up (it is run after kube-up).
#
# Assumed vars:
# PROJECT
# Variables from config.sh
function test-setup {
# Detect the project into $PROJECT if it isn't set
# gce specific
detect-project
# Open up port 80 & 8080 so common containers on minions can be reached
@ -705,12 +697,10 @@ function test-setup {
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt"
}
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e-test.sh
#
# Assumed Vars:
# PROJECT
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e.go
function test-teardown {
detect-project
echo "Shutting down test cluster in background."
gcloud compute firewall-rules delete \
--project "${PROJECT}" \
@ -735,6 +725,7 @@ function restart-kube-proxy {
function setup-monitoring {
if [[ "${ENABLE_CLUSTER_MONITORING}" == "true" ]]; then
echo "Setting up cluster monitoring using Heapster."
detect-project
if ! gcloud compute firewall-rules describe monitoring-heapster &>/dev/null; then
if ! gcloud compute firewall-rules create monitoring-heapster \

View File

@ -0,0 +1,34 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script should be sourced as a part of config-test or config-default.
# Specifically, the following environment variables are assumed:
# - CLUSTER_NAME (the name of the cluster)
MASTER_NAME="k8s-${CLUSTER_NAME}-master"
ZONE="${ZONE:-us-central1-f}"
NUM_MINIONS="${NUM_MINIONS:-2}"
CLUSTER_API_VERSION="${CLUSTER_API_VERSION:-}"
# TODO(mbforbes): Actually plumb this through; this currently only works
# because we use the 'default' network by default.
NETWORK="${NETWORK:-default}"
GCLOUD="${GCLOUD:-gcloud}"
GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}"
# This is a hack, but I keep setting this when I run commands manually, and
# then things grossly fail during normal runs because cluster/kubecfg.sh and
# cluster/kubectl.sh both use this if it's set.
unset KUBERNETES_MASTER

View File

@ -0,0 +1,22 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The following are default-specific settings.
CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke}"
# For ease of maintenance, extract any pieces that do not vary between default
# and test in a common config.
source $(dirname "${BASH_SOURCE}")/config-common.sh

View File

@ -0,0 +1,22 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The following are test-specific settings.
CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke-e2e}"
# For ease of maintenance, extract any pieces that do not vary between default
# and test in a common config.
source $(dirname "${BASH_SOURCE}")/config-common.sh

247
cluster/gke/util.sh Normal file
View File

@ -0,0 +1,247 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gke/${KUBE_CONFIG_FILE:-config-default.sh}"
# Perform preparations required to run e2e tests
#
# Assumed vars:
# GCLOUD
function prepare-e2e() {
echo "... in prepare-e2e()" >&2
# Ensure GCLOUD is set to some gcloud binary.
if [[ -z "${GCLOUD:-}" ]]; then
echo "GCLOUD environment variable is not set. It should be your gcloud binary. " >&2
echo "A sane default is probably \$ export GCLOUD=gcloud" >&2
exit 1
fi
}
# Use the gcloud defaults to find the project. If it is already set in the
# environment then go with that.
#
# Assumed vars:
# GCLOUD
# Vars set:
# PROJECT
function detect-project() {
echo "... in detect-project()" >&2
if [[ -z "${PROJECT:-}" ]]; then
export PROJECT=$("${GCLOUD}" config list project | tail -n 1 | cut -f 3 -d ' ')
fi
if [[ -z "${PROJECT:-}" ]]; then
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
echo "'gcloud config set project <PROJECT>'" >&2
exit 1
fi
echo "Project: ${PROJECT}" >&2
}
# Execute prior to running tests to build a release if required for env.
function test-build-release() {
echo "... in test-build-release()" >&2
# We currently use the Kubernetes version that GKE supports (not testing
# bleeding-edge builds).
}
# Verify needed binaries exist.
function verify-prereqs() {
echo "... in verify-prereqs()" >&2
local cmd
# TODO(mbforbes): This assumes you have a "normal" gcloud installed even if
# you are setting one differently using ${GCLOUD}.
for cmd in gcloud kubectl; do
which "${cmd}" >/dev/null || {
echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud "
echo "SDK can be downloaded from https://cloud.google.com/sdk/."
exit 1
}
done
}
# Instantiate a kubernetes cluster
#
# Assumed vars:
# GCLOUD
# CLUSTER_NAME
# ZONE
# CLUSTER_API_VERSION (optional)
# NUM_MINIONS
function kube-up() {
echo "... in kube-up()" >&2
detect-project >&2
"${GCLOUD}" preview container clusters create "${CLUSTER_NAME}" \
--zone="${ZONE}" \
--project="${PROJECT}" \
--cluster-api-version="${CLUSTER_API_VERSION:-}" \
--num-nodes="${NUM_MINIONS}"
}
# Called during cluster/kube-up.sh
function setup-monitoring() {
echo "... in setup-monitoring()" >&2
# TODO(mbforbes): This isn't currently supported in GKE.
}
# Execute prior to running tests to initialize required structure. This is
# called from hack/e2e-go only when running -up (it is run after kube-up, so
# the cluster already exists at this point).
#
# Assumed vars:
# CLUSTER_NAME
# GCLOUD
# Vars set:
# MINION_TAG
function test-setup() {
echo "... in test-setup()" >&2
# Detect the project into $PROJECT if it isn't set
detect-project >&2
# At this point, CLUSTER_NAME should have been used, so its value is final.
MINION_TAG="k8s-${CLUSTER_NAME}-node"
# Open up port 80 & 8080 so common containers on minions can be reached.
# TODO(mbforbes): Is adding ${USER} necessary, and sufficient, to avoid
# collisions here?
"${GCLOUD}" compute firewall-rules create \
"${MINION_TAG}-${USER}-http-alt" \
--allow tcp:80 tcp:8080 \
--project "${PROJECT}" \
--target-tags "${MINION_TAG}"
}
# Ensure that we have a password created for validating to the master.
#
# Assumed vars:
# ZONE
# CLUSTER_NAME
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password() {
echo "... in get-password()" >&2
detect-project >&2
KUBE_USER=$("${GCLOUD}" preview container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep user | cut -f 4 -d ' ')
KUBE_PASSWORD=$("${GCLOUD}" preview container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep password | cut -f 4 -d ' ')
}
# Detect the instance name and IP for the master
#
# Assumed vars:
# ZONE
# CLUSTER_NAME
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master() {
echo "... in detect-master()" >&2
detect-project >&2
KUBE_MASTER="k8s-${CLUSTER_NAME}-master"
KUBE_MASTER_IP=$("${GCLOUD}" preview container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep endpoint | cut -f 2 -d ' ')
}
# Assumed vars:
# NUM_MINIONS
# CLUSTER_NAME
# Vars set:
# MINION_NAMES (array)
function detect-minions() {
echo "... in detect-minions()" >&2
# Just get the minion names.
MINION_NAMES=()
for (( i=1; i<=${NUM_MINIONS}; i++)); do
MINION_NAMES+=("k8s-${CLUSTER_NAME}-node-${i}")
done
}
# SSH to a node by name ($1) and run a command ($2).
#
# Assumed vars:
# GCLOUD
# ZONE
function ssh-to-node() {
echo "... in ssh-to-node()" >&2
detect-project >&2
local node="$1"
local cmd="$2"
"${GCLOUD}" compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" \
--zone="${ZONE}" "${node}" --command "${cmd}"
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy() {
echo "... in restart-kube-proxy()" >&2
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
}
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e-test.sh. This calls kube-down, so the cluster still exists when
# this is called.
#
# Assumed vars:
# CLUSTER_NAME
# GCLOUD
# KUBE_ROOT
function test-teardown() {
echo "... in test-teardown()" >&2
detect-project >&2
# At this point, CLUSTER_NAME should have been used, so its value is final.
MINION_TAG="k8s-${CLUSTER_NAME}-node"
# First, remove anything we did with test-setup (currently, the firewall).
# NOTE: Keep in sync with name above in test-setup.
"${GCLOUD}" compute firewall-rules delete "${MINION_TAG}-${USER}-http-alt" \
--project="${PROJECT}" || true
# Then actually turn down the cluster.
"${KUBE_ROOT}/cluster/kube-down.sh"
}
# Tears down monitoring.
function teardown-monitoring() {
echo "... in teardown-monitoring()" >&2
# TODO(mbforbes): This isn't currently supported in GKE.
}
# Actually take down the cluster. This is called from test-teardown.
#
# Assumed vars:
# GCLOUD
# ZONE
# CLUSTER_NAME
function kube-down() {
echo "... in kube-down()" >&2
detect-project >&2
"${GCLOUD}" preview container clusters delete --project="${PROJECT}" \
--zone="${ZONE}" "${CLUSTER_NAME}"
}

View File

@ -18,7 +18,7 @@
# You can override the default provider by exporting the KUBERNETES_PROVIDER
# variable in your bashrc
#
# The valid values: 'gce', 'aws', 'azure', 'vagrant', 'local', 'vsphere'
# The valid values: 'gce', 'gke', 'aws', 'azure', 'vagrant', 'local', 'vsphere'
KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER:-gce}

View File

@ -28,12 +28,18 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/kube-env.sh"
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
echo "Starting cluster using provider: $KUBERNETES_PROVIDER"
echo "Starting cluster using provider: $KUBERNETES_PROVIDER" >&2
echo "... calling verify-prereqs" >&2
verify-prereqs
echo "... calling kube-up" >&2
kube-up
echo "... calling validate-cluster" >&2
"${KUBE_ROOT}/cluster/validate-cluster.sh"
echo "... calling setup-monitoring" >&2
setup-monitoring
echo "Done"
echo "Done" >&2

View File

@ -87,6 +87,16 @@ if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
auth_config=(
"-auth" "$HOME/.kubernetes_vagrant_auth"
)
elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
# While KUBECFG calls remain, we manually pass the auth and cert info.
detect-project &> /dev/null
cluster_config_dir="${GCLOUD_CONFIG_DIR}/${PROJECT}.${ZONE}.${CLUSTER_NAME}"
auth_config=(
"-certificate_authority=${cluster_config_dir}/ca.crt"
"-client_certificate=${cluster_config_dir}/kubecfg.crt"
"-client_key=${cluster_config_dir}/kubecfg.key"
"-auth=${cluster_config_dir}/kubernetes_auth"
)
else
auth_config=()
fi
@ -96,4 +106,5 @@ if [[ -n "${KUBE_MASTER_IP-}" && -z "${KUBERNETES_MASTER-}" ]]; then
export KUBERNETES_MASTER=https://${KUBE_MASTER_IP}
fi
"$kubecfg" "${auth_config[@]:+${auth_config[@]}}" "$@"
echo "Running: " "${kubecfg}" "${auth_config[@]:+${auth_config[@]}}" "${@}" >&2
"${kubecfg}" "${auth_config[@]:+${auth_config[@]}}" "${@}"

View File

@ -63,6 +63,7 @@ locations=(
"${KUBE_ROOT}/_output/dockerized/bin/${host_os}/${host_arch}/kubectl"
"${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}/kubectl"
"${KUBE_ROOT}/platforms/${host_os}/${host_arch}/kubectl"
"${HOME}/google-cloud-sdk/bin/kubectl"
)
kubectl=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
@ -81,19 +82,34 @@ if [[ ! -x "$kubectl" ]]; then
exit 1
fi
# When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster.
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
auth_config=(
"--auth-path=$HOME/.kubernetes_vagrant_auth"
)
else
auth_config=()
# While GKE requires the kubectl binary, it's actually called through gcloud.
if [[ "$KUBERNETES_PROVIDER" == "gke" ]]; then
detect-project &> /dev/null
kubectl="${GCLOUD}"
fi
detect-master > /dev/null
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
# When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster.
config=(
"--auth-path=$HOME/.kubernetes_vagrant_auth"
)
elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
# GKE runs kubectl through gcloud.
config=(
"preview"
"container"
"kubectl"
"--project=${PROJECT}"
"--zone=${ZONE}"
"--cluster=${CLUSTER_NAME}"
)
fi
detect-master &> /dev/null
if [[ -n "${KUBE_MASTER_IP-}" && -z "${KUBERNETES_MASTER-}" ]]; then
export KUBERNETES_MASTER=https://${KUBE_MASTER_IP}
fi
"$kubectl" "${auth_config[@]:+${auth_config[@]}}" "$@"
echo "Running:" "${kubectl}" "${config[@]:+${config[@]}}" "${@}" >&2
"${kubectl}" "${config[@]:+${config[@]}}" "${@}"

View File

@ -37,6 +37,7 @@ import (
var (
authConfig = flag.String("auth_config", os.Getenv("HOME")+"/.kubernetes_auth", "Path to the auth info file.")
certDir = flag.String("cert_dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
host = flag.String("host", "", "The host to connect to")
repoRoot = flag.String("repo_root", "./", "Root directory of kubernetes repository, for finding test files. Default assumes working directory is repository root")
)
@ -88,11 +89,18 @@ func loadClientOrDie() *client.Client {
config := client.Config{
Host: *host,
}
auth, err := clientauth.LoadFromFile(*authConfig)
info, err := clientauth.LoadFromFile(*authConfig)
if err != nil {
glog.Fatalf("Error loading auth: %v", err)
}
config, err = auth.MergeWithConfig(config)
// If the certificate directory is provided, set the cert paths to be there.
if *certDir != "" {
glog.Infof("Expecting certs in %v.", *certDir)
info.CAFile = filepath.Join(*certDir, "ca.crt")
info.CertFile = filepath.Join(*certDir, "kubecfg.crt")
info.KeyFile = filepath.Join(*certDir, "kubecfg.key")
}
config, err = info.MergeWithConfig(config)
if err != nil {
glog.Fatalf("Error creating client")
}
@ -208,12 +216,12 @@ func TestImportantURLs(c *client.Client) bool {
// TestKubeletSendsEvent checks that kubelets and scheduler send events about pods scheduling and running.
func TestKubeletSendsEvent(c *client.Client) bool {
provider := os.Getenv("KUBERNETES_PROVIDER")
if len(provider) > 0 && provider != "gce" {
if len(provider) > 0 && provider != "gce" && provider != "gke" {
glog.Infof("skipping TestKubeletSendsEvent on cloud provider %s", provider)
return true
}
if provider == "" {
glog.Info("KUBERNETES_PROVIDER is unset assuming \"gce\"")
glog.Info("KUBERNETES_PROVIDER is unset; assuming \"gce\"")
}
podClient := c.Pods(api.NamespaceDefault)
@ -406,16 +414,23 @@ func main() {
info := []TestInfo{}
passed := true
for _, test := range tests {
for i, test := range tests {
glog.Infof("Running test %d", i+1)
testPassed := test.test(c)
if !testPassed {
glog.Infof(" test %d failed", i+1)
passed = false
} // TODO: clean up objects created during a test after the test, so cases
} else {
glog.Infof(" test %d passed", i+1)
}
// TODO: clean up objects created during a test after the test, so cases
// are independent.
info = append(info, TestInfo{testPassed, test})
}
outputTAPSummary(info)
if !passed {
glog.Fatalf("Tests failed")
glog.Fatalf("At least one test failed")
} else {
glog.Infof("All tests pass")
}
}

View File

@ -64,12 +64,20 @@ locations=(
)
e2e=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
# When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster.
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
# When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster.
auth_config=(
"--auth_config=$HOME/.kubernetes_vagrant_auth"
)
elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
# With GKE, our auth and certs are in gcloud's config directory.
detect-project &> /dev/null
cfg_dir="${GCLOUD_CONFIG_DIR}/${PROJECT}.${ZONE}.${CLUSTER_NAME}"
auth_config=(
"--auth_config=${cfg_dir}/kubernetes_auth"
"--cert_dir=${cfg_dir}"
)
else
auth_config=()
fi