Swich to new gcloud API for GCE MIGs

pull/6/head
Wojciech Tyczynski 2015-07-22 13:40:22 +02:00
parent 6129d3d4eb
commit 2d95cd454c
5 changed files with 29 additions and 60 deletions

View File

@ -253,24 +253,6 @@ function detect-minion-names {
echo "MINION_NAMES=${MINION_NAMES[*]}" >&2
}
# Waits until the number of running nodes in the instance group is equal to NUM_NODES
#
# Assumed vars:
# NODE_INSTANCE_PREFIX
# NUM_MINIONS
function wait-for-minions-to-run {
detect-project
local running_minions=0
while [[ "${NUM_MINIONS}" != "${running_minions}" ]]; do
echo -e -n "${color_yellow}Waiting for minions to run. "
echo -e "${running_minions} out of ${NUM_MINIONS} running. Retrying.${color_norm}"
sleep 5
running_minions=$((gcloud preview --project "${PROJECT}" instance-groups \
--zone "${ZONE}" instances --group "${NODE_INSTANCE_PREFIX}-group" list \
--running || true) | wc -l | xargs)
done
}
# Detect the information about the minions
#
# Assumed vars:
@ -695,16 +677,17 @@ function kube-up {
write-node-env
create-node-instance-template
gcloud preview managed-instance-groups --zone "${ZONE}" \
gcloud compute instance-groups managed \
create "${NODE_INSTANCE_PREFIX}-group" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--base-instance-name "${NODE_INSTANCE_PREFIX}" \
--size "${NUM_MINIONS}" \
--template "${NODE_INSTANCE_PREFIX}-template" || true;
# TODO: this should be true when the above create managed-instance-group
# command returns, but currently it returns before the instances come up due
# to gcloud's deficiency.
wait-for-minions-to-run
gcloud compute instance-groups managed wait-until-stable \
"${NODE_INSTANCE_PREFIX}-group" \
--zone "${ZONE}" \
--project "${PROJECT}" || true;
detect-minion-names
detect-master
@ -797,8 +780,8 @@ function kube-down {
# The gcloud APIs don't return machine parseable error codes/retry information. Therefore the best we can
# do is parse the output and special case particular responses we are interested in.
if gcloud preview managed-instance-groups --project "${PROJECT}" --zone "${ZONE}" describe "${NODE_INSTANCE_PREFIX}-group" &>/dev/null; then
deleteCmdOutput=$(gcloud preview managed-instance-groups --zone "${ZONE}" delete \
if gcloud compute instance-groups managed describe --project "${PROJECT}" --zone "${ZONE}" "${NODE_INSTANCE_PREFIX}-group" &>/dev/null; then
deleteCmdOutput=$(gcloud compute instance-groups managed delete --zone "${ZONE}" \
--project "${PROJECT}" \
--quiet \
"${NODE_INSTANCE_PREFIX}-group")
@ -810,7 +793,7 @@ function kube-down {
while [[ "$deleteCmdStatus" != "DONE" ]]
do
sleep 5
deleteCmdOperationOutput=$(gcloud preview managed-instance-groups --zone "${ZONE}" --project "${PROJECT}" get-operation $deleteCmdOperationId)
deleteCmdOperationOutput=$(gcloud compute instance-groups managed --zone "${ZONE}" --project "${PROJECT}" get-operation $deleteCmdOperationId)
deleteCmdStatus=$(echo $deleteCmdOperationOutput | grep -i "status:" | sed "s/.*status:[[:space:]]*\([^[:space:]]*\).*/\1/g")
echo "Waiting for MIG deletion to complete. Current status: " $deleteCmdStatus
done
@ -921,7 +904,7 @@ function kube-down {
# $3: managed instance group name
function get-template {
# url is set to https://www.googleapis.com/compute/v1/projects/$1/global/instanceTemplates/<template>
local url=$(gcloud preview managed-instance-groups --project="${1}" --zone="${2}" describe "${3}" | grep instanceTemplate)
local url=$(gcloud compute instance-groups managed describe --project="${1}" --zone="${2}" "${3}" | grep instanceTemplate)
# template is set to <template> (the pattern strips off all but last slash)
local template="${url##*/}"
echo "${template}"
@ -942,7 +925,7 @@ function check-resources {
echo "Looking for already existing resources"
KUBE_RESOURCE_FOUND=""
if gcloud preview managed-instance-groups --project "${PROJECT}" --zone "${ZONE}" describe "${NODE_INSTANCE_PREFIX}-group" &>/dev/null; then
if gcloud compute instance-groups managed describe --project "${PROJECT}" --zone "${ZONE}" "${NODE_INSTANCE_PREFIX}-group" &>/dev/null; then
KUBE_RESOURCE_FOUND="Managed instance group ${NODE_INSTANCE_PREFIX}-group"
return 1
fi
@ -1039,7 +1022,7 @@ function prepare-push() {
# being used, create a temp one, then delete the old one and recreate it once again.
create-node-instance-template "tmp"
gcloud preview managed-instance-groups --zone "${ZONE}" \
gcloud compute instance-groups managed --zone "${ZONE}" \
set-template "${NODE_INSTANCE_PREFIX}-group" \
--project "${PROJECT}" \
--template "${NODE_INSTANCE_PREFIX}-template-tmp" || true;
@ -1051,7 +1034,7 @@ function prepare-push() {
create-node-instance-template
gcloud preview managed-instance-groups --zone "${ZONE}" \
gcloud compute instance-groups managed --zone "${ZONE}" \
set-template "${NODE_INSTANCE_PREFIX}-group" \
--project "${PROJECT}" \
--template "${NODE_INSTANCE_PREFIX}-template" || true;

View File

@ -195,7 +195,7 @@ its own. (In the future, we plan to limit authorization to only allow a kubelet
If your cluster runs short on resources you can easily add more machines to it if your cluster is running in Node self-registration mode. If you're using GCE or GKE it's done by resizing Instance Group managing your Nodes. It can be accomplished by modifying number of instances on `Compute > Compute Engine > Instance groups > your group > Edit group` [Google Cloud Console page](https://console.developers.google.com) or using gcloud CLI:
```
gcloud preview managed-instance-groups --zone compute-zone resize my-cluster-minon-group --new-size 42
gcloud compute instance-groups managed --zone compute-zone resize my-cluster-minon-group --new-size 42
```
Instance Group will take care of putting appropriate image on new machines and start them, while Kubelet will register its Node with API server to make it available for scheduling. If you scale the instance group down, system will randomly choose Nodes to kill.

View File

@ -526,13 +526,12 @@ func migTemplate() (string, error) {
if wait.Poll(poll, singleCallTimeout, func() (bool, error) {
// TODO(mbforbes): make this hit the compute API directly instead of
// shelling out to gcloud.
o, err := exec.Command("gcloud", "preview", "managed-instance-groups",
o, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
"describe", testContext.CloudConfig.NodeInstanceGroup,
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
"describe",
testContext.CloudConfig.NodeInstanceGroup).CombinedOutput()
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone)).CombinedOutput()
if err != nil {
errLast = fmt.Errorf("gcloud preview managed-instance-groups describe call failed with err: %v", err)
errLast = fmt.Errorf("gcloud compute instance-groups managed describe call failed with err: %v", err)
return false, nil
}
output := string(o)

View File

@ -19,8 +19,7 @@ package e2e
import (
"fmt"
"os/exec"
"strconv"
"strings"
"regexp"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@ -45,8 +44,9 @@ func resizeGroup(size int) error {
if testContext.Provider == "gce" || testContext.Provider == "gke" {
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
output, err := exec.Command("gcloud", "preview", "managed-instance-groups", "--project="+testContext.CloudConfig.ProjectID, "--zone="+testContext.CloudConfig.Zone,
"resize", testContext.CloudConfig.NodeInstanceGroup, fmt.Sprintf("--new-size=%v", size)).CombinedOutput()
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize",
testContext.CloudConfig.NodeInstanceGroup, fmt.Sprintf("--size=%v", size),
"--project="+testContext.CloudConfig.ProjectID, "--zone="+testContext.CloudConfig.Zone).CombinedOutput()
if err != nil {
Logf("Failed to resize node instance group: %v", string(output))
}
@ -65,27 +65,14 @@ func groupSize() (int, error) {
if testContext.Provider == "gce" || testContext.Provider == "gke" {
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
output, err := exec.Command("gcloud", "preview", "managed-instance-groups", "--project="+testContext.CloudConfig.ProjectID,
"--zone="+testContext.CloudConfig.Zone, "describe", testContext.CloudConfig.NodeInstanceGroup).CombinedOutput()
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
"list-instances", testContext.CloudConfig.NodeInstanceGroup, "--project="+testContext.CloudConfig.ProjectID,
"--zone="+testContext.CloudConfig.Zone).CombinedOutput()
if err != nil {
return -1, err
}
pattern := "currentSize: "
i := strings.Index(string(output), pattern)
if i == -1 {
return -1, fmt.Errorf("could not find '%s' in the output '%s'", pattern, output)
}
truncated := output[i+len(pattern):]
j := strings.Index(string(truncated), "\n")
if j == -1 {
return -1, fmt.Errorf("could not find new line in the truncated output '%s'", truncated)
}
currentSize, err := strconv.Atoi(string(truncated[:j]))
if err != nil {
return -1, err
}
return currentSize, nil
re := regexp.MustCompile("RUNNING")
return len(re.FindAllString(string(output), -1)), nil
} else {
// Supported by aws
instanceGroups, ok := testContext.CloudConfig.Provider.(aws_cloud.InstanceGroups)

View File

@ -232,13 +232,13 @@ func restartNodes(provider string, nt time.Duration) error {
// done
//
// # Step 2: Start the recreate.
// output=$(gcloud preview managed-instance-groups --project=${PROJECT} --zone=${ZONE} recreate-instances ${GROUP} --instance="${i}")
// output=$(gcloud compute instance-groups managed --project=${PROJECT} --zone=${ZONE} recreate-instances ${GROUP} --instance="${i}")
// op=${output##*:}
//
// # Step 3: Wait until it's complete.
// status=""
// while [[ "${status}" != "DONE" ]]; do
// output=$(gcloud preview managed-instance-groups --zone="${ZONE}" get-operation ${op} | grep status)
// output=$(gcloud compute instance-groups managed --zone="${ZONE}" get-operation ${op} | grep status)
// status=${output##*:}
// done
func migRollingUpdateSelf(nt time.Duration) error {