2014-07-14 17:50:04 +00:00
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
# Copyright 2014 Google Inc. All rights reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
# exit on any error
|
|
|
|
set -e
|
|
|
|
|
2014-09-29 20:37:04 +00:00
|
|
|
function release_not_found() {
|
|
|
|
echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2
|
|
|
|
echo "are running from a clone of the git repo, please run ./build/release.sh." >&2
|
|
|
|
echo "Note that this requires having Docker installed. If you are running " >&2
|
|
|
|
echo "from a release tarball, something is wrong. Look at " >&2
|
|
|
|
echo "http://kubernetes.io/ for information on how to contact the development team for help." >&2
|
|
|
|
exit 1
|
|
|
|
}
|
|
|
|
|
|
|
|
# Look for our precompiled binary releases. When running from a source repo,
|
|
|
|
# these are generated under _output. When running from an release tarball these
|
|
|
|
# are under ./server.
|
|
|
|
server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz"
|
|
|
|
if [[ ! -f "$server_binary_tar" ]]; then
|
|
|
|
server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
|
|
|
|
fi
|
|
|
|
if [[ ! -f "$server_binary_tar" ]]; then
|
|
|
|
release_not_found
|
|
|
|
fi
|
|
|
|
|
|
|
|
salt_tar="/vagrant/server/kubernetes-salt.tar.gz"
|
|
|
|
if [[ ! -f "$salt_tar" ]]; then
|
|
|
|
salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz"
|
|
|
|
fi
|
|
|
|
if [[ ! -f "$salt_tar" ]]; then
|
|
|
|
release_not_found
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
2014-09-05 16:33:52 +00:00
|
|
|
# Setup hosts file to support ping by hostname to each minion in the cluster from apiserver
|
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
minion=${MINION_NAMES[$i]}
|
2014-12-12 19:08:22 +00:00
|
|
|
ip=${MINION_IPS[$i]}
|
2014-09-05 16:33:52 +00:00
|
|
|
if [ ! "$(cat /etc/hosts | grep $minion)" ]; then
|
|
|
|
echo "Adding $minion to hosts file"
|
|
|
|
echo "$ip $minion" >> /etc/hosts
|
2014-09-29 20:11:31 +00:00
|
|
|
fi
|
2014-09-05 16:33:52 +00:00
|
|
|
done
|
|
|
|
|
2014-08-12 19:43:35 +00:00
|
|
|
# Update salt configuration
|
|
|
|
mkdir -p /etc/salt/minion.d
|
2014-12-16 23:16:59 +00:00
|
|
|
cat <<EOF >/etc/salt/minion.d/master.conf
|
|
|
|
master: '$(echo "$MASTER_NAME" | sed -e "s/'/''/g")'
|
2015-02-21 18:31:50 +00:00
|
|
|
master: '$(echo "$MASTER_NAME" | sed -e "s/'/''/g")'
|
|
|
|
auth_timeout: 10
|
|
|
|
auth_tries: 2
|
|
|
|
auth_safemode: True
|
|
|
|
ping_interval: 1
|
|
|
|
random_reauth_delay: 3
|
|
|
|
state_aggregrate:
|
|
|
|
- pkg
|
2014-12-16 23:16:59 +00:00
|
|
|
EOF
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2014-08-12 19:43:35 +00:00
|
|
|
cat <<EOF >/etc/salt/minion.d/grains.conf
|
2014-07-14 17:50:04 +00:00
|
|
|
grains:
|
2014-12-16 23:16:59 +00:00
|
|
|
node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
|
|
|
master_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
|
|
|
publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
2014-09-08 17:15:40 +00:00
|
|
|
network_mode: openvswitch
|
2014-11-20 02:59:07 +00:00
|
|
|
networkInterfaceName: eth1
|
2014-12-16 23:16:59 +00:00
|
|
|
etcd_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
|
|
|
api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
2014-11-14 06:14:56 +00:00
|
|
|
cloud: vagrant
|
2014-08-06 17:15:14 +00:00
|
|
|
cloud_provider: vagrant
|
2014-07-14 17:50:04 +00:00
|
|
|
roles:
|
|
|
|
- kubernetes-master
|
2015-01-30 17:16:24 +00:00
|
|
|
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
|
2014-07-14 17:50:04 +00:00
|
|
|
EOF
|
|
|
|
|
2014-10-13 03:08:46 +00:00
|
|
|
mkdir -p /srv/salt-overlay/pillar
|
|
|
|
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
2014-12-16 23:16:59 +00:00
|
|
|
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
|
|
|
|
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
2014-12-16 23:16:59 +00:00
|
|
|
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
|
2014-12-16 23:16:59 +00:00
|
|
|
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
|
|
|
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
2014-11-07 04:49:21 +00:00
|
|
|
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
|
2014-11-07 04:49:21 +00:00
|
|
|
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
|
|
|
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
2015-02-27 18:17:46 +00:00
|
|
|
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
2015-03-06 20:41:13 +00:00
|
|
|
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
2014-10-13 03:08:46 +00:00
|
|
|
EOF
|
|
|
|
|
2014-08-12 19:43:35 +00:00
|
|
|
# Configure the salt-master
|
|
|
|
# Auto accept all keys from minions that try to join
|
|
|
|
mkdir -p /etc/salt/master.d
|
|
|
|
cat <<EOF >/etc/salt/master.d/auto-accept.conf
|
2014-07-14 17:50:04 +00:00
|
|
|
open_mode: True
|
|
|
|
auto_accept: True
|
|
|
|
EOF
|
|
|
|
|
2014-08-12 19:43:35 +00:00
|
|
|
cat <<EOF >/etc/salt/master.d/reactor.conf
|
2014-07-14 17:50:04 +00:00
|
|
|
# React to new minions starting by running highstate on them.
|
|
|
|
reactor:
|
|
|
|
- 'salt/minion/*/start':
|
2014-10-12 21:24:01 +00:00
|
|
|
- /srv/reactor/highstate-new.sls
|
2014-07-14 17:50:04 +00:00
|
|
|
EOF
|
|
|
|
|
2014-08-12 19:43:35 +00:00
|
|
|
cat <<EOF >/etc/salt/master.d/salt-output.conf
|
2014-08-06 17:18:36 +00:00
|
|
|
# Minimize the amount of output to terminal
|
|
|
|
state_verbose: False
|
2014-08-29 20:51:16 +00:00
|
|
|
state_output: mixed
|
2014-12-12 19:08:22 +00:00
|
|
|
log_level: debug
|
|
|
|
log_level_logfile: debug
|
|
|
|
EOF
|
|
|
|
|
|
|
|
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
|
|
|
|
log_level: debug
|
|
|
|
log_level_logfile: debug
|
2014-08-06 17:18:36 +00:00
|
|
|
EOF
|
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
|
2014-12-04 18:40:00 +00:00
|
|
|
# Generate and distribute a shared secret (bearer token) to
|
|
|
|
# apiserver and kubelet so that kubelet can authenticate to
|
|
|
|
# apiserver to send events.
|
|
|
|
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
|
2014-12-12 19:08:22 +00:00
|
|
|
if [[ ! -f "${known_tokens_file}" ]]; then
|
|
|
|
kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
|
2014-12-04 18:40:00 +00:00
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
|
|
|
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
|
|
|
|
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file)
|
|
|
|
|
|
|
|
mkdir -p /srv/salt-overlay/salt/kubelet
|
|
|
|
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
|
|
|
|
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
|
|
|
|
fi
|
2014-12-04 18:40:00 +00:00
|
|
|
|
2014-08-12 19:43:35 +00:00
|
|
|
# Configure nginx authorization
|
2014-10-14 22:00:52 +00:00
|
|
|
mkdir -p /srv/salt-overlay/salt/nginx
|
2014-12-12 19:08:22 +00:00
|
|
|
if [[ ! -f /srv/salt-overlay/salt/nginx/htpasswd ]]; then
|
|
|
|
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
|
|
|
|
-b -c "/srv/salt-overlay/salt/nginx/htpasswd" \
|
|
|
|
"$MASTER_USER" "$MASTER_PASSWD"
|
|
|
|
fi
|
2014-10-14 22:00:52 +00:00
|
|
|
|
|
|
|
echo "Running release install script"
|
|
|
|
rm -rf /kube-install
|
|
|
|
mkdir -p /kube-install
|
|
|
|
pushd /kube-install
|
|
|
|
tar xzf "$salt_tar"
|
|
|
|
cp "$server_binary_tar" .
|
|
|
|
./kubernetes/saltbase/install.sh "${server_binary_tar##*/}"
|
|
|
|
popd
|
2014-08-12 19:43:35 +00:00
|
|
|
|
2014-10-14 16:41:45 +00:00
|
|
|
# we will run provision to update code each time we test, so we do not want to do salt installs each time
|
2014-12-12 19:08:22 +00:00
|
|
|
if ! which salt-master &>/dev/null; then
|
2014-08-12 19:43:35 +00:00
|
|
|
|
2014-08-06 17:15:14 +00:00
|
|
|
# Configure the salt-api
|
|
|
|
cat <<EOF >/etc/salt/master.d/salt-api.conf
|
|
|
|
# Set vagrant user as REST API user
|
|
|
|
external_auth:
|
|
|
|
pam:
|
|
|
|
vagrant:
|
|
|
|
- .*
|
|
|
|
rest_cherrypy:
|
|
|
|
port: 8000
|
|
|
|
host: 127.0.0.1
|
|
|
|
disable_ssl: True
|
|
|
|
webhook_disable_auth: True
|
|
|
|
EOF
|
|
|
|
|
2014-10-14 16:41:45 +00:00
|
|
|
# Install Salt Master
|
2014-07-14 17:50:04 +00:00
|
|
|
#
|
|
|
|
# -M installs the master
|
2014-10-14 16:41:45 +00:00
|
|
|
# -N does not install the minion
|
|
|
|
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -N
|
2014-08-06 17:15:14 +00:00
|
|
|
|
|
|
|
# Install salt-api
|
2014-12-16 23:16:59 +00:00
|
|
|
#
|
2014-12-08 16:36:26 +00:00
|
|
|
# This is used to provide the network transport for salt-api
|
|
|
|
yum install -y python-cherrypy
|
2014-08-29 20:51:16 +00:00
|
|
|
# This is used to inform the cloud provider used in the vagrant cluster
|
2014-08-06 17:15:14 +00:00
|
|
|
yum install -y salt-api
|
2014-08-28 05:49:34 +00:00
|
|
|
# Set log level to a level higher than "info" to prevent the message about
|
|
|
|
# enabling the service (which is not an error) from being printed to stderr.
|
|
|
|
SYSTEMD_LOG_LEVEL=notice systemctl enable salt-api
|
2014-08-06 17:15:14 +00:00
|
|
|
systemctl start salt-api
|
2014-10-14 16:41:45 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
if ! which salt-minion >/dev/null 2>&1; then
|
|
|
|
|
|
|
|
# Install Salt minion
|
|
|
|
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s
|
2014-10-14 22:00:52 +00:00
|
|
|
|
2014-09-29 20:37:04 +00:00
|
|
|
else
|
|
|
|
# Only run highstate when updating the config. In the first-run case, Salt is
|
|
|
|
# set up to run highstate as new minions join for the first time.
|
|
|
|
echo "Executing configuration"
|
|
|
|
salt '*' mine.update
|
2014-12-12 19:08:22 +00:00
|
|
|
salt --show-timeout --force-color '*' state.highstate
|
2014-07-14 17:50:04 +00:00
|
|
|
fi
|