mirror of https://github.com/k3s-io/k3s
AWS: Don't use JSON parsing in kube-up
We use the AWS CLI support for --query and --filter instead; should be more reliable and clearer. Also set the output format to text, so we don't have to set it every time and don't risk problems if we forget to set it. Fixes #16747 We do still have to use JSON parsing in one place: ELB does not support --filter, so we have to use Python there.pull/6/head
parent
33a5874d11
commit
d5f62ca67b
|
@ -64,9 +64,9 @@ esac
|
||||||
AWS_REGION=${ZONE%?}
|
AWS_REGION=${ZONE%?}
|
||||||
|
|
||||||
export AWS_DEFAULT_REGION=${AWS_REGION}
|
export AWS_DEFAULT_REGION=${AWS_REGION}
|
||||||
AWS_CMD="aws --output json ec2"
|
export AWS_DEFAULT_OUTPUT=text
|
||||||
AWS_ELB_CMD="aws --output json elb"
|
AWS_CMD="aws ec2"
|
||||||
AWS_ASG_CMD="aws --output json autoscaling"
|
AWS_ASG_CMD="aws autoscaling"
|
||||||
|
|
||||||
VPC_CIDR_BASE=172.20
|
VPC_CIDR_BASE=172.20
|
||||||
MASTER_IP_SUFFIX=.9
|
MASTER_IP_SUFFIX=.9
|
||||||
|
@ -88,15 +88,10 @@ BLOCK_DEVICE_MAPPINGS_BASE="{\"DeviceName\": \"/dev/sdc\",\"VirtualName\":\"ephe
|
||||||
MASTER_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MASTER_ROOT_DISK_SIZE},\"VolumeType\":\"${MASTER_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]"
|
MASTER_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MASTER_ROOT_DISK_SIZE},\"VolumeType\":\"${MASTER_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]"
|
||||||
MINION_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MINION_ROOT_DISK_SIZE},\"VolumeType\":\"${MINION_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]"
|
MINION_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MINION_ROOT_DISK_SIZE},\"VolumeType\":\"${MINION_ROOT_DISK_TYPE}\"}}, ${BLOCK_DEVICE_MAPPINGS_BASE}]"
|
||||||
|
|
||||||
function json_val {
|
|
||||||
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1''
|
|
||||||
}
|
|
||||||
|
|
||||||
# TODO (ayurchuk) Refactor the get_* functions to use filters
|
|
||||||
# TODO (bburns) Parameterize this for multiple cluster per project
|
# TODO (bburns) Parameterize this for multiple cluster per project
|
||||||
|
|
||||||
function get_vpc_id {
|
function get_vpc_id {
|
||||||
$AWS_CMD --output text describe-vpcs \
|
$AWS_CMD describe-vpcs \
|
||||||
--filters Name=tag:Name,Values=kubernetes-vpc \
|
--filters Name=tag:Name,Values=kubernetes-vpc \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
--query Vpcs[].VpcId
|
--query Vpcs[].VpcId
|
||||||
|
@ -105,7 +100,7 @@ function get_vpc_id {
|
||||||
function get_subnet_id {
|
function get_subnet_id {
|
||||||
local vpc_id=$1
|
local vpc_id=$1
|
||||||
local az=$2
|
local az=$2
|
||||||
$AWS_CMD --output text describe-subnets \
|
$AWS_CMD describe-subnets \
|
||||||
--filters Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
--filters Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
Name=availabilityZone,Values=${az} \
|
Name=availabilityZone,Values=${az} \
|
||||||
Name=vpc-id,Values=${vpc_id} \
|
Name=vpc-id,Values=${vpc_id} \
|
||||||
|
@ -114,24 +109,20 @@ function get_subnet_id {
|
||||||
|
|
||||||
function get_igw_id {
|
function get_igw_id {
|
||||||
local vpc_id=$1
|
local vpc_id=$1
|
||||||
$AWS_CMD --output text describe-internet-gateways \
|
$AWS_CMD describe-internet-gateways \
|
||||||
--filters Name=attachment.vpc-id,Values=${vpc_id} \
|
--filters Name=attachment.vpc-id,Values=${vpc_id} \
|
||||||
--query InternetGateways[].InternetGatewayId
|
--query InternetGateways[].InternetGatewayId
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_elbs_in_vpc {
|
function get_elbs_in_vpc {
|
||||||
# ELB doesn't seem to be on the same platform as the rest of AWS; doesn't support filtering
|
# ELB doesn't seem to be on the same platform as the rest of AWS; doesn't support filtering
|
||||||
$AWS_ELB_CMD describe-load-balancers | \
|
aws elb --output json describe-load-balancers | \
|
||||||
python -c "import json,sys; lst = [str(lb['LoadBalancerName']) for lb in json.load(sys.stdin)['LoadBalancerDescriptions'] if lb['VPCId'] == '$1']; print '\n'.join(lst)"
|
python -c "import json,sys; lst = [str(lb['LoadBalancerName']) for lb in json.load(sys.stdin)['LoadBalancerDescriptions'] if lb['VPCId'] == '$1']; print '\n'.join(lst)"
|
||||||
}
|
}
|
||||||
|
|
||||||
function expect_instance_states {
|
|
||||||
python -c "import json,sys; lst = [str(instance['InstanceId']) for reservation in json.load(sys.stdin)['Reservations'] for instance in reservation['Instances'] if instance['State']['Name'] != '$1']; print ' '.join(lst)"
|
|
||||||
}
|
|
||||||
|
|
||||||
function get_instanceid_from_name {
|
function get_instanceid_from_name {
|
||||||
local tagName=$1
|
local tagName=$1
|
||||||
$AWS_CMD --output text describe-instances \
|
$AWS_CMD describe-instances \
|
||||||
--filters Name=tag:Name,Values=${tagName} \
|
--filters Name=tag:Name,Values=${tagName} \
|
||||||
Name=instance-state-name,Values=running \
|
Name=instance-state-name,Values=running \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
|
@ -140,14 +131,14 @@ function get_instanceid_from_name {
|
||||||
|
|
||||||
function get_instance_public_ip {
|
function get_instance_public_ip {
|
||||||
local instance_id=$1
|
local instance_id=$1
|
||||||
$AWS_CMD --output text describe-instances \
|
$AWS_CMD describe-instances \
|
||||||
--instance-ids ${instance_id} \
|
--instance-ids ${instance_id} \
|
||||||
--query Reservations[].Instances[].NetworkInterfaces[0].Association.PublicIp
|
--query Reservations[].Instances[].NetworkInterfaces[0].Association.PublicIp
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_instance_private_ip {
|
function get_instance_private_ip {
|
||||||
local instance_id=$1
|
local instance_id=$1
|
||||||
$AWS_CMD --output text describe-instances \
|
$AWS_CMD describe-instances \
|
||||||
--instance-ids ${instance_id} \
|
--instance-ids ${instance_id} \
|
||||||
--query Reservations[].Instances[].NetworkInterfaces[0].PrivateIpAddress
|
--query Reservations[].Instances[].NetworkInterfaces[0].PrivateIpAddress
|
||||||
}
|
}
|
||||||
|
@ -155,7 +146,7 @@ function get_instance_private_ip {
|
||||||
# Gets a security group id, by name ($1)
|
# Gets a security group id, by name ($1)
|
||||||
function get_security_group_id {
|
function get_security_group_id {
|
||||||
local name=$1
|
local name=$1
|
||||||
$AWS_CMD --output text describe-security-groups \
|
$AWS_CMD describe-security-groups \
|
||||||
--filters Name=vpc-id,Values=${VPC_ID} \
|
--filters Name=vpc-id,Values=${VPC_ID} \
|
||||||
Name=group-name,Values=${name} \
|
Name=group-name,Values=${name} \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
|
@ -185,7 +176,7 @@ function detect-master () {
|
||||||
|
|
||||||
function query-running-minions () {
|
function query-running-minions () {
|
||||||
local query=$1
|
local query=$1
|
||||||
$AWS_CMD --output text describe-instances \
|
$AWS_CMD describe-instances \
|
||||||
--filters Name=instance-state-name,Values=running \
|
--filters Name=instance-state-name,Values=running \
|
||||||
Name=vpc-id,Values=${VPC_ID} \
|
Name=vpc-id,Values=${VPC_ID} \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
|
@ -391,7 +382,7 @@ function create-security-group {
|
||||||
local sgid=$(get_security_group_id "${name}")
|
local sgid=$(get_security_group_id "${name}")
|
||||||
if [[ -z "$sgid" ]]; then
|
if [[ -z "$sgid" ]]; then
|
||||||
echo "Creating security group ${name}."
|
echo "Creating security group ${name}."
|
||||||
sgid=$($AWS_CMD create-security-group --group-name "${name}" --description "${description}" --vpc-id "${VPC_ID}" --query GroupId --output text)
|
sgid=$($AWS_CMD create-security-group --group-name "${name}" --description "${description}" --vpc-id "${VPC_ID}" --query GroupId)
|
||||||
add-tag $sgid KubernetesCluster ${CLUSTER_ID}
|
add-tag $sgid KubernetesCluster ${CLUSTER_ID}
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -421,7 +412,7 @@ function authorize-security-group-ingress {
|
||||||
function find-master-pd {
|
function find-master-pd {
|
||||||
local name=${MASTER_NAME}-pd
|
local name=${MASTER_NAME}-pd
|
||||||
if [[ -z "${MASTER_DISK_ID}" ]]; then
|
if [[ -z "${MASTER_DISK_ID}" ]]; then
|
||||||
MASTER_DISK_ID=`$AWS_CMD --output text describe-volumes \
|
MASTER_DISK_ID=`$AWS_CMD describe-volumes \
|
||||||
--filters Name=availability-zone,Values=${ZONE} \
|
--filters Name=availability-zone,Values=${ZONE} \
|
||||||
Name=tag:Name,Values=${name} \
|
Name=tag:Name,Values=${name} \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
|
@ -438,7 +429,7 @@ function ensure-master-pd {
|
||||||
|
|
||||||
if [[ -z "${MASTER_DISK_ID}" ]]; then
|
if [[ -z "${MASTER_DISK_ID}" ]]; then
|
||||||
echo "Creating master disk: size ${MASTER_DISK_SIZE}GB, type ${MASTER_DISK_TYPE}"
|
echo "Creating master disk: size ${MASTER_DISK_SIZE}GB, type ${MASTER_DISK_TYPE}"
|
||||||
MASTER_DISK_ID=`$AWS_CMD create-volume --availability-zone ${ZONE} --volume-type ${MASTER_DISK_TYPE} --size ${MASTER_DISK_SIZE} --query VolumeId --output text`
|
MASTER_DISK_ID=`$AWS_CMD create-volume --availability-zone ${ZONE} --volume-type ${MASTER_DISK_TYPE} --size ${MASTER_DISK_SIZE} --query VolumeId`
|
||||||
add-tag ${MASTER_DISK_ID} Name ${name}
|
add-tag ${MASTER_DISK_ID} Name ${name}
|
||||||
add-tag ${MASTER_DISK_ID} KubernetesCluster ${CLUSTER_ID}
|
add-tag ${MASTER_DISK_ID} KubernetesCluster ${CLUSTER_ID}
|
||||||
fi
|
fi
|
||||||
|
@ -456,12 +447,12 @@ function create-dhcp-option-set () {
|
||||||
OPTION_SET_DOMAIN="${AWS_REGION}.compute.internal"
|
OPTION_SET_DOMAIN="${AWS_REGION}.compute.internal"
|
||||||
esac
|
esac
|
||||||
|
|
||||||
DHCP_OPTION_SET_ID=$($AWS_CMD create-dhcp-options --dhcp-configuration Key=domain-name,Values=${OPTION_SET_DOMAIN} Key=domain-name-servers,Values=AmazonProvidedDNS | json_val '["DhcpOptions"]["DhcpOptionsId"]')
|
DHCP_OPTION_SET_ID=$($AWS_CMD create-dhcp-options --dhcp-configuration Key=domain-name,Values=${OPTION_SET_DOMAIN} Key=domain-name-servers,Values=AmazonProvidedDNS --query DhcpOptions.DhcpOptionsId)
|
||||||
|
|
||||||
add-tag ${DHCP_OPTION_SET_ID} Name kubernetes-dhcp-option-set
|
add-tag ${DHCP_OPTION_SET_ID} Name kubernetes-dhcp-option-set
|
||||||
add-tag ${DHCP_OPTION_SET_ID} KubernetesCluster ${CLUSTER_ID}
|
add-tag ${DHCP_OPTION_SET_ID} KubernetesCluster ${CLUSTER_ID}
|
||||||
|
|
||||||
$AWS_CMD associate-dhcp-options --dhcp-options-id ${DHCP_OPTION_SET_ID} --vpc-id ${VPC_ID}
|
$AWS_CMD associate-dhcp-options --dhcp-options-id ${DHCP_OPTION_SET_ID} --vpc-id ${VPC_ID} > $LOG
|
||||||
|
|
||||||
echo "Using DHCP option set ${DHCP_OPTION_SET_ID}"
|
echo "Using DHCP option set ${DHCP_OPTION_SET_ID}"
|
||||||
}
|
}
|
||||||
|
@ -561,7 +552,7 @@ function upload-server-tars() {
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local s3_bucket_location=$(aws --output text s3api get-bucket-location --bucket ${AWS_S3_BUCKET})
|
local s3_bucket_location=$(aws s3api get-bucket-location --bucket ${AWS_S3_BUCKET})
|
||||||
local s3_url_base=https://s3-${s3_bucket_location}.amazonaws.com
|
local s3_url_base=https://s3-${s3_bucket_location}.amazonaws.com
|
||||||
if [[ "${s3_bucket_location}" == "None" ]]; then
|
if [[ "${s3_bucket_location}" == "None" ]]; then
|
||||||
# "US Classic" does not follow the pattern
|
# "US Classic" does not follow the pattern
|
||||||
|
@ -641,15 +632,17 @@ function ensure-iam-profiles {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Wait for instance to be in running state
|
# Wait for instance to be in specified state
|
||||||
function wait-for-instance-running {
|
function wait-for-instance-state {
|
||||||
instance_id=$1
|
instance_id=$1
|
||||||
|
state=$2
|
||||||
|
|
||||||
while true; do
|
while true; do
|
||||||
instance_state=$($AWS_CMD describe-instances --instance-ids ${instance_id} | expect_instance_states running)
|
instance_state=$($AWS_CMD describe-instances --instance-ids ${instance_id} --query Reservations[].Instances[].State.Name)
|
||||||
if [[ "$instance_state" == "" ]]; then
|
if [[ "$instance_state" == "${state}" ]]; then
|
||||||
break
|
break
|
||||||
else
|
else
|
||||||
echo "Waiting for instance ${instance_id} to spawn"
|
echo "Waiting for instance ${instance_id} to be ${state} (currently ${instance_state})"
|
||||||
echo "Sleeping for 3 seconds..."
|
echo "Sleeping for 3 seconds..."
|
||||||
sleep 3
|
sleep 3
|
||||||
fi
|
fi
|
||||||
|
@ -659,22 +652,16 @@ function wait-for-instance-running {
|
||||||
# Allocates new Elastic IP from Amazon
|
# Allocates new Elastic IP from Amazon
|
||||||
# Output: allocated IP address
|
# Output: allocated IP address
|
||||||
function allocate-elastic-ip {
|
function allocate-elastic-ip {
|
||||||
$AWS_CMD allocate-address --domain vpc --output text | cut -f3
|
$AWS_CMD allocate-address --domain vpc --query PublicIp
|
||||||
}
|
}
|
||||||
|
|
||||||
function assign-ip-to-instance {
|
function assign-ip-to-instance {
|
||||||
local ip_address=$1
|
local ip_address=$1
|
||||||
local instance_id=$2
|
local instance_id=$2
|
||||||
local fallback_ip=$3
|
|
||||||
|
|
||||||
local elastic_ip_allocation_id=$($AWS_CMD describe-addresses --public-ips $ip_address --output text | cut -f2)
|
local elastic_ip_allocation_id=$($AWS_CMD describe-addresses --public-ips $ip_address --query Addresses[].AllocationId)
|
||||||
local association_result=$($AWS_CMD associate-address --instance-id ${master_instance_id} --allocation-id ${elastic_ip_allocation_id} > /dev/null && echo "success" || echo "failure")
|
echo "Attaching IP ${ip_address} to instance ${instance_id}"
|
||||||
|
$AWS_CMD associate-address --instance-id ${instance_id} --allocation-id ${elastic_ip_allocation_id} > $LOG
|
||||||
if [[ $association_result = "success" ]]; then
|
|
||||||
echo "${ip_address}"
|
|
||||||
else
|
|
||||||
echo "${fallback_ip}"
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# If MASTER_RESERVED_IP looks like IP address, will try to assign it to master instance
|
# If MASTER_RESERVED_IP looks like IP address, will try to assign it to master instance
|
||||||
|
@ -732,7 +719,7 @@ function kube-up {
|
||||||
fi
|
fi
|
||||||
if [[ -z "$VPC_ID" ]]; then
|
if [[ -z "$VPC_ID" ]]; then
|
||||||
echo "Creating vpc."
|
echo "Creating vpc."
|
||||||
VPC_ID=$($AWS_CMD create-vpc --cidr-block ${VPC_CIDR} | json_val '["Vpc"]["VpcId"]')
|
VPC_ID=$($AWS_CMD create-vpc --cidr-block ${VPC_CIDR} --query Vpc.VpcId)
|
||||||
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > $LOG
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > $LOG
|
||||||
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > $LOG
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > $LOG
|
||||||
add-tag $VPC_ID Name kubernetes-vpc
|
add-tag $VPC_ID Name kubernetes-vpc
|
||||||
|
@ -749,12 +736,12 @@ function kube-up {
|
||||||
|
|
||||||
if [[ -z "$SUBNET_ID" ]]; then
|
if [[ -z "$SUBNET_ID" ]]; then
|
||||||
echo "Creating subnet."
|
echo "Creating subnet."
|
||||||
SUBNET_ID=$($AWS_CMD create-subnet --cidr-block ${SUBNET_CIDR} --vpc-id $VPC_ID --availability-zone ${ZONE} | json_val '["Subnet"]["SubnetId"]')
|
SUBNET_ID=$($AWS_CMD create-subnet --cidr-block ${SUBNET_CIDR} --vpc-id $VPC_ID --availability-zone ${ZONE} --query Subnet.SubnetId)
|
||||||
add-tag $SUBNET_ID KubernetesCluster ${CLUSTER_ID}
|
add-tag $SUBNET_ID KubernetesCluster ${CLUSTER_ID}
|
||||||
else
|
else
|
||||||
EXISTING_CIDR=$($AWS_CMD describe-subnets --subnet-ids ${SUBNET_ID} --query Subnets[].CidrBlock --output text)
|
EXISTING_CIDR=$($AWS_CMD describe-subnets --subnet-ids ${SUBNET_ID} --query Subnets[].CidrBlock)
|
||||||
echo "Using existing subnet with CIDR $EXISTING_CIDR"
|
echo "Using existing subnet with CIDR $EXISTING_CIDR"
|
||||||
VPC_CIDR=$($AWS_CMD describe-vpcs --vpc-ids ${VPC_ID} --query Vpcs[].CidrBlock --output text)
|
VPC_CIDR=$($AWS_CMD describe-vpcs --vpc-ids ${VPC_ID} --query Vpcs[].CidrBlock)
|
||||||
echo "VPC CIDR is $VPC_CIDR"
|
echo "VPC CIDR is $VPC_CIDR"
|
||||||
VPC_CIDR_BASE=${VPC_CIDR%.*.*}
|
VPC_CIDR_BASE=${VPC_CIDR%.*.*}
|
||||||
MASTER_INTERNAL_IP=${VPC_CIDR_BASE}.0${MASTER_IP_SUFFIX}
|
MASTER_INTERNAL_IP=${VPC_CIDR_BASE}.0${MASTER_IP_SUFFIX}
|
||||||
|
@ -766,20 +753,20 @@ function kube-up {
|
||||||
IGW_ID=$(get_igw_id $VPC_ID)
|
IGW_ID=$(get_igw_id $VPC_ID)
|
||||||
if [[ -z "$IGW_ID" ]]; then
|
if [[ -z "$IGW_ID" ]]; then
|
||||||
echo "Creating Internet Gateway."
|
echo "Creating Internet Gateway."
|
||||||
IGW_ID=$($AWS_CMD create-internet-gateway | json_val '["InternetGateway"]["InternetGatewayId"]')
|
IGW_ID=$($AWS_CMD create-internet-gateway --query InternetGateway.InternetGatewayId)
|
||||||
$AWS_CMD attach-internet-gateway --internet-gateway-id $IGW_ID --vpc-id $VPC_ID > $LOG
|
$AWS_CMD attach-internet-gateway --internet-gateway-id $IGW_ID --vpc-id $VPC_ID > $LOG
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Using Internet Gateway $IGW_ID"
|
echo "Using Internet Gateway $IGW_ID"
|
||||||
|
|
||||||
echo "Associating route table."
|
echo "Associating route table."
|
||||||
ROUTE_TABLE_ID=$($AWS_CMD --output text describe-route-tables \
|
ROUTE_TABLE_ID=$($AWS_CMD describe-route-tables \
|
||||||
--filters Name=vpc-id,Values=${VPC_ID} \
|
--filters Name=vpc-id,Values=${VPC_ID} \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
--query RouteTables[].RouteTableId)
|
--query RouteTables[].RouteTableId)
|
||||||
if [[ -z "${ROUTE_TABLE_ID}" ]]; then
|
if [[ -z "${ROUTE_TABLE_ID}" ]]; then
|
||||||
echo "Creating route table"
|
echo "Creating route table"
|
||||||
ROUTE_TABLE_ID=$($AWS_CMD --output text create-route-table \
|
ROUTE_TABLE_ID=$($AWS_CMD create-route-table \
|
||||||
--vpc-id=${VPC_ID} \
|
--vpc-id=${VPC_ID} \
|
||||||
--query RouteTable.RouteTableId)
|
--query RouteTable.RouteTableId)
|
||||||
add-tag ${ROUTE_TABLE_ID} KubernetesCluster ${CLUSTER_ID}
|
add-tag ${ROUTE_TABLE_ID} KubernetesCluster ${CLUSTER_ID}
|
||||||
|
@ -920,7 +907,8 @@ function start-master() {
|
||||||
--security-group-ids ${MASTER_SG_ID} \
|
--security-group-ids ${MASTER_SG_ID} \
|
||||||
--associate-public-ip-address \
|
--associate-public-ip-address \
|
||||||
--block-device-mappings "${MASTER_BLOCK_DEVICE_MAPPINGS}" \
|
--block-device-mappings "${MASTER_BLOCK_DEVICE_MAPPINGS}" \
|
||||||
--user-data file://${KUBE_TEMP}/master-start.sh | json_val '["Instances"][0]["InstanceId"]')
|
--user-data file://${KUBE_TEMP}/master-start.sh \
|
||||||
|
--query Instances[].InstanceId)
|
||||||
add-tag $master_id Name $MASTER_NAME
|
add-tag $master_id Name $MASTER_NAME
|
||||||
add-tag $master_id Role $MASTER_TAG
|
add-tag $master_id Role $MASTER_TAG
|
||||||
add-tag $master_id KubernetesCluster ${CLUSTER_ID}
|
add-tag $master_id KubernetesCluster ${CLUSTER_ID}
|
||||||
|
@ -941,7 +929,7 @@ function start-master() {
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
# We are not able to add an elastic ip, a route or volume to the instance until that instance is in "running" state.
|
# We are not able to add an elastic ip, a route or volume to the instance until that instance is in "running" state.
|
||||||
wait-for-instance-running $master_id
|
wait-for-instance-state ${master_id} "running"
|
||||||
|
|
||||||
KUBE_MASTER=${MASTER_NAME}
|
KUBE_MASTER=${MASTER_NAME}
|
||||||
KUBE_MASTER_IP=$(assign-elastic-ip $ip $master_id)
|
KUBE_MASTER_IP=$(assign-elastic-ip $ip $master_id)
|
||||||
|
@ -1185,7 +1173,7 @@ function kube-down {
|
||||||
if [[ -n "${elb_ids}" ]]; then
|
if [[ -n "${elb_ids}" ]]; then
|
||||||
echo "Deleting ELBs in: ${vpc_id}"
|
echo "Deleting ELBs in: ${vpc_id}"
|
||||||
for elb_id in ${elb_ids}; do
|
for elb_id in ${elb_ids}; do
|
||||||
$AWS_ELB_CMD delete-load-balancer --load-balancer-name=${elb_id}
|
aws elb delete-load-balancer --load-balancer-name=${elb_id} >$LOG
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Waiting for ELBs to be deleted"
|
echo "Waiting for ELBs to be deleted"
|
||||||
|
@ -1203,21 +1191,21 @@ function kube-down {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Deleting instances in VPC: ${vpc_id}"
|
echo "Deleting instances in VPC: ${vpc_id}"
|
||||||
instance_ids=$($AWS_CMD --output text describe-instances \
|
instance_ids=$($AWS_CMD describe-instances \
|
||||||
--filters Name=vpc-id,Values=${vpc_id} \
|
--filters Name=vpc-id,Values=${vpc_id} \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
--query Reservations[].Instances[].InstanceId)
|
--query Reservations[].Instances[].InstanceId)
|
||||||
|
|
||||||
if [[ -n "${instance_ids}" ]]; then
|
if [[ -n "${instance_ids}" ]]; then
|
||||||
asg_groups=$($AWS_CMD --output text describe-instances \
|
asg_groups=$($AWS_CMD describe-instances \
|
||||||
--query 'Reservations[].Instances[].Tags[?Key==`aws:autoscaling:groupName`].Value[]' \
|
--query 'Reservations[].Instances[].Tags[?Key==`aws:autoscaling:groupName`].Value[]' \
|
||||||
--instance-ids ${instance_ids})
|
--instance-ids ${instance_ids})
|
||||||
for asg_group in ${asg_groups}; do
|
for asg_group in ${asg_groups}; do
|
||||||
if [[ -n $(${AWS_ASG_CMD} --output text describe-auto-scaling-groups --auto-scaling-group-names ${asg_group} --query AutoScalingGroups[].AutoScalingGroupName) ]]; then
|
if [[ -n $(${AWS_ASG_CMD} describe-auto-scaling-groups --auto-scaling-group-names ${asg_group} --query AutoScalingGroups[].AutoScalingGroupName) ]]; then
|
||||||
echo "Deleting auto-scaling group: ${asg_group}"
|
echo "Deleting auto-scaling group: ${asg_group}"
|
||||||
${AWS_ASG_CMD} delete-auto-scaling-group --force-delete --auto-scaling-group-name ${asg_group}
|
${AWS_ASG_CMD} delete-auto-scaling-group --force-delete --auto-scaling-group-name ${asg_group}
|
||||||
fi
|
fi
|
||||||
if [[ -n $(${AWS_ASG_CMD} --output text describe-launch-configurations --launch-configuration-names ${asg_group} --query LaunchConfigurations[].LaunchConfigurationName) ]]; then
|
if [[ -n $(${AWS_ASG_CMD} describe-launch-configurations --launch-configuration-names ${asg_group} --query LaunchConfigurations[].LaunchConfigurationName) ]]; then
|
||||||
echo "Deleting auto-scaling launch configuration: ${asg_group}"
|
echo "Deleting auto-scaling launch configuration: ${asg_group}"
|
||||||
${AWS_ASG_CMD} delete-launch-configuration --launch-configuration-name ${asg_group}
|
${AWS_ASG_CMD} delete-launch-configuration --launch-configuration-name ${asg_group}
|
||||||
fi
|
fi
|
||||||
|
@ -1225,26 +1213,19 @@ function kube-down {
|
||||||
|
|
||||||
$AWS_CMD terminate-instances --instance-ids ${instance_ids} > $LOG
|
$AWS_CMD terminate-instances --instance-ids ${instance_ids} > $LOG
|
||||||
echo "Waiting for instances to be deleted"
|
echo "Waiting for instances to be deleted"
|
||||||
while true; do
|
for instance_id in ${instance_ids}; do
|
||||||
local instance_states=$($AWS_CMD describe-instances --instance-ids ${instance_ids} | expect_instance_states terminated)
|
wait-for-instance-state ${instance_id} "terminated"
|
||||||
if [[ -z "${instance_states}" ]]; then
|
|
||||||
echo "All instances deleted"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo "Instances not yet deleted: ${instance_states}"
|
|
||||||
echo "Sleeping for 3 seconds..."
|
|
||||||
sleep 3
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
echo "All instances deleted"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Deleting VPC: ${vpc_id}"
|
echo "Cleaning up resources in VPC: ${vpc_id}"
|
||||||
default_sg_id=$($AWS_CMD --output text describe-security-groups \
|
default_sg_id=$($AWS_CMD describe-security-groups \
|
||||||
--filters Name=vpc-id,Values=${vpc_id} \
|
--filters Name=vpc-id,Values=${vpc_id} \
|
||||||
Name=group-name,Values=default \
|
Name=group-name,Values=default \
|
||||||
--query SecurityGroups[].GroupId \
|
--query SecurityGroups[].GroupId \
|
||||||
| tr "\t" "\n")
|
| tr "\t" "\n")
|
||||||
sg_ids=$($AWS_CMD --output text describe-security-groups \
|
sg_ids=$($AWS_CMD describe-security-groups \
|
||||||
--filters Name=vpc-id,Values=${vpc_id} \
|
--filters Name=vpc-id,Values=${vpc_id} \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
--query SecurityGroups[].GroupId \
|
--query SecurityGroups[].GroupId \
|
||||||
|
@ -1258,7 +1239,7 @@ function kube-down {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Cleaning up security group: ${sg_id}"
|
echo "Cleaning up security group: ${sg_id}"
|
||||||
other_sgids=$(aws ec2 describe-security-groups --group-id "${sg_id}" --query SecurityGroups[].IpPermissions[].UserIdGroupPairs[].GroupId --output text)
|
other_sgids=$(${AWS_CMD} describe-security-groups --group-id "${sg_id}" --query SecurityGroups[].IpPermissions[].UserIdGroupPairs[].GroupId)
|
||||||
for other_sgid in ${other_sgids}; do
|
for other_sgid in ${other_sgids}; do
|
||||||
$AWS_CMD revoke-security-group-ingress --group-id "${sg_id}" --source-group "${other_sgid}" --protocol all > $LOG
|
$AWS_CMD revoke-security-group-ingress --group-id "${sg_id}" --source-group "${other_sgid}" --protocol all > $LOG
|
||||||
done
|
done
|
||||||
|
@ -1274,7 +1255,7 @@ function kube-down {
|
||||||
$AWS_CMD delete-security-group --group-id ${sg_id} > $LOG
|
$AWS_CMD delete-security-group --group-id ${sg_id} > $LOG
|
||||||
done
|
done
|
||||||
|
|
||||||
subnet_ids=$($AWS_CMD --output text describe-subnets \
|
subnet_ids=$($AWS_CMD describe-subnets \
|
||||||
--filters Name=vpc-id,Values=${vpc_id} \
|
--filters Name=vpc-id,Values=${vpc_id} \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
--query Subnets[].SubnetId \
|
--query Subnets[].SubnetId \
|
||||||
|
@ -1283,7 +1264,7 @@ function kube-down {
|
||||||
$AWS_CMD delete-subnet --subnet-id ${subnet_id} > $LOG
|
$AWS_CMD delete-subnet --subnet-id ${subnet_id} > $LOG
|
||||||
done
|
done
|
||||||
|
|
||||||
igw_ids=$($AWS_CMD --output text describe-internet-gateways \
|
igw_ids=$($AWS_CMD describe-internet-gateways \
|
||||||
--filters Name=attachment.vpc-id,Values=${vpc_id} \
|
--filters Name=attachment.vpc-id,Values=${vpc_id} \
|
||||||
--query InternetGateways[].InternetGatewayId \
|
--query InternetGateways[].InternetGatewayId \
|
||||||
| tr "\t" "\n")
|
| tr "\t" "\n")
|
||||||
|
@ -1292,7 +1273,7 @@ function kube-down {
|
||||||
$AWS_CMD delete-internet-gateway --internet-gateway-id $igw_id > $LOG
|
$AWS_CMD delete-internet-gateway --internet-gateway-id $igw_id > $LOG
|
||||||
done
|
done
|
||||||
|
|
||||||
route_table_ids=$($AWS_CMD --output text describe-route-tables \
|
route_table_ids=$($AWS_CMD describe-route-tables \
|
||||||
--filters Name=vpc-id,Values=$vpc_id \
|
--filters Name=vpc-id,Values=$vpc_id \
|
||||||
Name=route.destination-cidr-block,Values=0.0.0.0/0 \
|
Name=route.destination-cidr-block,Values=0.0.0.0/0 \
|
||||||
--query RouteTables[].RouteTableId \
|
--query RouteTables[].RouteTableId \
|
||||||
|
@ -1300,7 +1281,7 @@ function kube-down {
|
||||||
for route_table_id in ${route_table_ids}; do
|
for route_table_id in ${route_table_ids}; do
|
||||||
$AWS_CMD delete-route --route-table-id $route_table_id --destination-cidr-block 0.0.0.0/0 > $LOG
|
$AWS_CMD delete-route --route-table-id $route_table_id --destination-cidr-block 0.0.0.0/0 > $LOG
|
||||||
done
|
done
|
||||||
route_table_ids=$($AWS_CMD --output text describe-route-tables \
|
route_table_ids=$($AWS_CMD describe-route-tables \
|
||||||
--filters Name=vpc-id,Values=$vpc_id \
|
--filters Name=vpc-id,Values=$vpc_id \
|
||||||
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
|
||||||
--query RouteTables[].RouteTableId \
|
--query RouteTables[].RouteTableId \
|
||||||
|
|
Loading…
Reference in New Issue