Use KUBECFG instead of CLOUDCFG in scripts

pull/6/head
Joe Beda 2014-09-02 11:29:22 -07:00
parent 6dd38e2f49
commit c47b405841
5 changed files with 26 additions and 26 deletions

View File

@ -17,8 +17,8 @@
source $(dirname $0)/kube-env.sh
source $(dirname $0)/$KUBERNETES_PROVIDER/util.sh
CLOUDCFG=$(dirname $0)/../_output/go/bin/kubecfg
if [ ! -x $CLOUDCFG ]; then
KUBECFG=$(dirname $0)/../_output/go/bin/kubecfg
if [ ! -x $KUBECFG ]; then
echo "Could not find kubecfg binary. Run hack/build-go.sh to build it."
exit 1
fi
@ -40,4 +40,4 @@ if [ "$KUBE_MASTER_IP" != "" ] && [ "$KUBERNETES_MASTER" == "" ]; then
export KUBERNETES_MASTER=https://${KUBE_MASTER_IP}
fi
$CLOUDCFG $AUTH_CONFIG "$@"
$KUBECFG $AUTH_CONFIG "$@"

View File

@ -24,7 +24,7 @@ source "${KUBE_REPO_ROOT}/cluster/kube-env.sh"
source "${KUBE_REPO_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
# Launch a container
$CLOUDCFG -p 8080:80 run dockerfile/nginx 2 myNginx
$KUBECFG -p 8080:80 run dockerfile/nginx 2 myNginx
function remove-quotes() {
local in=$1
@ -35,13 +35,13 @@ function remove-quotes() {
function teardown() {
echo "Cleaning up test artifacts"
$CLOUDCFG stop myNginx
$CLOUDCFG rm myNginx
$KUBECFG stop myNginx
$KUBECFG rm myNginx
}
trap "teardown" EXIT
POD_ID_LIST=$($CLOUDCFG '-template={{range.Items}}{{.ID}} {{end}}' -l name=myNginx list pods)
POD_ID_LIST=$($KUBECFG '-template={{range.Items}}{{.ID}} {{end}}' -l name=myNginx list pods)
# Container turn up on a clean cluster can take a while for the docker image pull.
ALL_RUNNING=0
while [ $ALL_RUNNING -ne 1 ]; do
@ -49,7 +49,7 @@ while [ $ALL_RUNNING -ne 1 ]; do
sleep 5
ALL_RUNNING=1
for id in $POD_ID_LIST; do
CURRENT_STATUS=$($CLOUDCFG -template '{{and .CurrentState.Info.mynginx.State.Running .CurrentState.Info.net.State.Running}}' get pods/$id)
CURRENT_STATUS=$($KUBECFG -template '{{and .CurrentState.Info.mynginx.State.Running .CurrentState.Info.net.State.Running}}' get pods/$id)
if [ "$CURRENT_STATUS" != "true" ]; then
ALL_RUNNING=0
fi

View File

@ -26,23 +26,23 @@ source "${KUBE_REPO_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
GUESTBOOK="${KUBE_REPO_ROOT}/examples/guestbook"
# Launch the guestbook example
$CLOUDCFG -c "${GUESTBOOK}/redis-master.json" create /pods
$CLOUDCFG -c "${GUESTBOOK}/redis-master-service.json" create /services
$CLOUDCFG -c "${GUESTBOOK}/redis-slave-controller.json" create /replicationControllers
$KUBECFG -c "${GUESTBOOK}/redis-master.json" create /pods
$KUBECFG -c "${GUESTBOOK}/redis-master-service.json" create /services
$KUBECFG -c "${GUESTBOOK}/redis-slave-controller.json" create /replicationControllers
sleep 5
POD_LIST_1=$($CLOUDCFG '-template={{range.Items}}{{.ID}} {{end}}' list pods)
POD_LIST_1=$($KUBECFG '-template={{range.Items}}{{.ID}} {{end}}' list pods)
echo "Pods running: ${POD_LIST_1}"
$CLOUDCFG stop redisSlaveController
$KUBECFG stop redisSlaveController
# Needed until issue #103 gets fixed
sleep 25
$CLOUDCFG rm redisSlaveController
$CLOUDCFG delete services/redismaster
$CLOUDCFG delete pods/redis-master-2
$KUBECFG rm redisSlaveController
$KUBECFG delete services/redismaster
$KUBECFG delete pods/redis-master-2
POD_LIST_2=$($CLOUDCFG '-template={{range.Items}}{{.ID}} {{end}}' list pods)
POD_LIST_2=$($KUBECFG '-template={{range.Items}}{{.ID}} {{end}}' list pods)
echo "Pods running after shutdown: ${POD_LIST_2}"
exit 0

View File

@ -24,7 +24,7 @@ source "${KUBE_REPO_ROOT}/cluster/kube-env.sh"
source "${KUBE_REPO_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
function validate() {
POD_ID_LIST=$($CLOUDCFG '-template={{range.Items}}{{.ID}} {{end}}' -l name=$controller list pods)
POD_ID_LIST=$($KUBECFG '-template={{range.Items}}{{.ID}} {{end}}' -l name=$controller list pods)
# Container turn up on a clean cluster can take a while for the docker image pull.
ALL_RUNNING=0
while [ $ALL_RUNNING -ne 1 ]; do
@ -32,7 +32,7 @@ function validate() {
sleep 5
ALL_RUNNING=1
for id in $POD_ID_LIST; do
CURRENT_STATUS=$($CLOUDCFG -template '{{and .CurrentState.Info.datacontroller.State.Running .CurrentState.Info.net.State.Running}}' get pods/$id)
CURRENT_STATUS=$($KUBECFG -template '{{and .CurrentState.Info.datacontroller.State.Running .CurrentState.Info.net.State.Running}}' get pods/$id)
if [ "$CURRENT_STATUS" != "true" ]; then
ALL_RUNNING=0
fi
@ -49,28 +49,28 @@ function validate() {
controller=dataController
# Launch a container
$CLOUDCFG -p 8080:80 run brendanburns/data 2 $controller
$KUBECFG -p 8080:80 run brendanburns/data 2 $controller
function teardown() {
echo "Cleaning up test artifacts"
$CLOUDCFG stop $controller
$CLOUDCFG rm $controller
$KUBECFG stop $controller
$KUBECFG rm $controller
}
trap "teardown" EXIT
validate 2
$CLOUDCFG resize $controller 1
$KUBECFG resize $controller 1
sleep 2
validate 1
$CLOUDCFG resize $controller 2
$KUBECFG resize $controller 2
sleep 2
validate 2
# TODO: test rolling update here, but to do so, we need to make the update blocking
# $CLOUDCFG -u=20s rollingupdate $controller
# $KUBECFG -u=20s rollingupdate $controller
#
# Wait for the replica controller to recreate
# sleep 10

View File

@ -32,7 +32,7 @@ set -e
# Use testing config
export KUBE_CONFIG_FILE="config-test.sh"
export KUBE_REPO_ROOT="$(dirname $0)/.."
export CLOUDCFG="${KUBE_REPO_ROOT}/cluster/kubecfg.sh -expect_version_match"
export KUBECFG="${KUBE_REPO_ROOT}/cluster/kubecfg.sh -expect_version_match"
if [[ $TEAR_DOWN -ne 0 ]]; then
detect-project