mirror of https://github.com/k3s-io/k3s
Merge pull request #11358 from ZJU-SEL/upgradek8s
[WIP]upgrade ubuntu bare-metal k8s to 1.0.1pull/6/head
commit
13c5761268
|
@ -1 +1,2 @@
|
|||
binaries
|
||||
skydns*
|
||||
|
|
|
@ -55,7 +55,7 @@ cp $ETCD/etcd $ETCD/etcdctl binaries/minion
|
|||
|
||||
# k8s
|
||||
echo "Download kubernetes release ..."
|
||||
K8S_VERSION=${K8S_VERSION:-"0.19.3"}
|
||||
K8S_VERSION=${K8S_VERSION:-"1.0.1"}
|
||||
|
||||
if [ ! -f kubernetes.tar.gz ] ; then
|
||||
curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v${K8S_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz
|
||||
|
|
|
@ -20,10 +20,10 @@
|
|||
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>
|
||||
export nodes=${nodes:-"vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"}
|
||||
# Define all your nodes role: a(master) or i(minion) or ai(both master and minion), must be the order same
|
||||
roles=${roles:-"ai i i"}
|
||||
role=${role:-"ai i i"}
|
||||
# If it practically impossible to set an array as an environment variable
|
||||
# from a script, so assume variable is a string then convert it to an array
|
||||
export roles=($roles)
|
||||
export roles=($role)
|
||||
|
||||
# Define minion numbers
|
||||
export NUM_MINIONS=${NUM_MINIONS:-3}
|
||||
|
@ -32,8 +32,7 @@ export NUM_MINIONS=${NUM_MINIONS:-3}
|
|||
export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24} # formerly PORTAL_NET
|
||||
# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE
|
||||
export FLANNEL_NET=${FLANNEL_NET:-172.16.0.0/16}
|
||||
echo "FLANNEL_NET"
|
||||
echo $FLANNEL_NET
|
||||
|
||||
export FLANNEL_OPTS=${FLANNEL_OPTS:-"Network": 172.16.0.0/16}
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
|
|
|
@ -19,14 +19,11 @@
|
|||
set -e
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "util.sh"
|
||||
detect-master
|
||||
KUBE_SERVER="http:\/\/${KUBE_MASTER_IP}:8080"
|
||||
|
||||
source "config-default.sh"
|
||||
if [ "${ENABLE_CLUSTER_DNS}" == true ]; then
|
||||
echo "Deploying DNS on kubernetes"
|
||||
sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g;s/kube_server_url/${KUBE_SERVER}/g;" skydns-rc.yaml.template > skydns-rc.yaml
|
||||
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" skydns-svc.yaml.template > skydns-svc.yaml
|
||||
sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g;" "${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.in" > skydns-rc.yaml
|
||||
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/skydns-svc.yaml.in" > skydns-svc.yaml
|
||||
|
||||
# use kubectl to create skydns rc and service
|
||||
"${KUBE_ROOT}/cluster/kubectl.sh" --namespace=kube-system create -f skydns-rc.yaml
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v4
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: {{ pillar['dns_replicas'] }}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: gcr.io/google_containers/etcd:2.0.9
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- -listen-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -advertise-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -initial-cluster-token
|
||||
- skydns-etcd
|
||||
- name: kube2sky
|
||||
image: gcr.io/google_containers/kube2sky:1.10
|
||||
args:
|
||||
# command = "/kube2sky"
|
||||
- -domain={{ pillar['dns_domain'] }}
|
||||
- -kube_master_url=kube_server_url
|
||||
- name: skydns
|
||||
image: gcr.io/google_containers/skydns:2015-03-11-001
|
||||
args:
|
||||
# command = "/skydns"
|
||||
- -machines=http://localhost:4001
|
||||
- -addr=0.0.0.0:53
|
||||
- -domain={{ pillar['dns_domain'] }}.
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- nslookup kubernetes.default.svc.{{ pillar['dns_domain'] }} localhost >/dev/null
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ pillar['dns_server'] }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
|
@ -139,7 +139,7 @@ function verify-cluster {
|
|||
echo
|
||||
echo "Kubernetes cluster is running. The master is running at:"
|
||||
echo
|
||||
echo " http://${MASTER_IP}"
|
||||
echo " http://${MASTER_IP}:8080"
|
||||
echo
|
||||
|
||||
}
|
||||
|
@ -183,16 +183,6 @@ function verify-minion(){
|
|||
printf "\n"
|
||||
}
|
||||
|
||||
function genServiceAccountsKey() {
|
||||
SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-false}
|
||||
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-"/tmp/kube-serviceaccount.key"}
|
||||
# Generate ServiceAccount key if needed
|
||||
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
|
||||
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
|
||||
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
function create-etcd-opts(){
|
||||
cat <<EOF > ~/kube/default/etcd
|
||||
ETCD_OPTS="-name $1 \
|
||||
|
@ -212,15 +202,17 @@ KUBE_APISERVER_OPTS="--address=0.0.0.0 \
|
|||
--logtostderr=true \
|
||||
--service-cluster-ip-range=${1} \
|
||||
--admission_control=${2} \
|
||||
--service_account_key_file=/tmp/kube-serviceaccount.key \
|
||||
--service_account_lookup=false "
|
||||
--client-ca-file=/srv/kubernetes/ca.crt
|
||||
--tls-cert-file=/srv/kubernetes/server.cert
|
||||
--tls-private-key-file=/srv/kubernetes/server.key"
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-kube-controller-manager-opts(){
|
||||
cat <<EOF > ~/kube/default/kube-controller-manager
|
||||
KUBE_CONTROLLER_MANAGER_OPTS="--master=127.0.0.1:8080 \
|
||||
--service_account_private_key_file=/tmp/kube-serviceaccount.key \
|
||||
--root-ca-file=/srv/kubernetes/ca.crt
|
||||
--service-account-private-key-file=/srv/kubernetes/server.key \
|
||||
--logtostderr=true"
|
||||
EOF
|
||||
|
||||
|
@ -371,19 +363,22 @@ function provision-master() {
|
|||
echo "Deploying master on machine ${MASTER_IP}"
|
||||
echo
|
||||
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
|
||||
scp -r $SSH_OPTS ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube"
|
||||
scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube"
|
||||
|
||||
# remote login to MASTER and use sudo to configue k8s master
|
||||
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
|
||||
genServiceAccountsKey; \
|
||||
groupadd -f -r kube-cert; \
|
||||
~/kube/make-ca-cert ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
|
||||
setClusterInfo; \
|
||||
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
|
||||
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}"; \
|
||||
create-kube-controller-manager-opts "${MINION_IPS}"; \
|
||||
create-kube-scheduler-opts; \
|
||||
create-flanneld-opts; \
|
||||
sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
|
||||
&& sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/; \
|
||||
sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ;\
|
||||
sudo groupadd -f -r kube-cert; \
|
||||
sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
|
||||
sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/; \
|
||||
sudo service etcd start;"
|
||||
}
|
||||
|
||||
|
@ -412,12 +407,11 @@ function provision-masterandminion() {
|
|||
echo "Deploying master and minion on machine ${MASTER_IP}"
|
||||
echo
|
||||
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
|
||||
scp -r $SSH_OPTS ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube"
|
||||
scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube"
|
||||
|
||||
# remote login to the node and use sudo to configue k8s
|
||||
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
|
||||
setClusterInfo; \
|
||||
genServiceAccountsKey; \
|
||||
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
|
||||
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}"; \
|
||||
create-kube-controller-manager-opts "${MINION_IPS}"; \
|
||||
|
@ -425,8 +419,10 @@ function provision-masterandminion() {
|
|||
create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";
|
||||
create-kube-proxy-opts "${MASTER_IP}";\
|
||||
create-flanneld-opts; \
|
||||
sudo -p '[sudo] password to copy files and start node: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
|
||||
&& sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin/; \
|
||||
sudo -p '[sudo] password to copy files and start node: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ; \
|
||||
sudo groupadd -f -r kube-cert; \
|
||||
sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
|
||||
sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin/; \
|
||||
sudo service etcd start; \
|
||||
sudo -b ~/kube/reconfDocker.sh"
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ This document describes how to deploy Kubernetes on ubuntu nodes, including 1 Ku
|
|||
|
||||
*3 These guide is tested OK on Ubuntu 14.04 LTS 64bit server, but it can not work with Ubuntu 15 which use systemd instead of upstart and we are fixing this*
|
||||
|
||||
*4 Dependencies of this guide: etcd-2.0.12, flannel-0.4.0, k8s-0.19.3, but it may work with higher versions*
|
||||
*4 Dependencies of this guide: etcd-2.0.12, flannel-0.4.0, k8s-1.0.1, but it may work with higher versions*
|
||||
|
||||
*5 All the remote servers can be ssh logged in without a password by using key authentication*
|
||||
|
||||
|
@ -64,12 +64,13 @@ This document describes how to deploy Kubernetes on ubuntu nodes, including 1 Ku
|
|||
|
||||
#### Make *kubernetes* , *etcd* and *flanneld* binaries
|
||||
|
||||
First clone the Kubernetes github repo, `$ git clone https://github.com/GoogleCloudPlatform/kubernetes.git`
|
||||
First clone the kubernetes github repo, `$ git clone https://github.com/GoogleCloudPlatform/kubernetes.git`
|
||||
|
||||
then `$ cd kubernetes/cluster/ubuntu`.
|
||||
|
||||
Then run `$ ./build.sh`, this will download all the needed binaries into `./binaries`.
|
||||
|
||||
You can customize your etcd version, flannel version, k8s version by changing variable `ETCD_VERSION` , `FLANNEL_VERSION` and `K8S_VERSION` in build.sh, default etcd version is 2.0.12, flannel version is 0.4.0 and K8s version is 0.19.3.
|
||||
You can customize your etcd version, flannel version, k8s version by changing variable `ETCD_VERSION` , `FLANNEL_VERSION` and `K8S_VERSION` in build.sh, default etcd version is 2.0.12, flannel version is 0.4.0 and K8s version is 1.0.1.
|
||||
|
||||
Please make sure that there are `kube-apiserver`, `kube-controller-manager`, `kube-scheduler`, `kubelet`, `kube-proxy`, `etcd`, `etcdctl` and `flannel` in the binaries/master or binaries/minion directory.
|
||||
|
||||
|
@ -90,11 +91,11 @@ First configure the cluster information in cluster/ubuntu/config-default.sh, bel
|
|||
```sh
|
||||
export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"
|
||||
|
||||
export roles="ai i i"
|
||||
export role="ai i i"
|
||||
|
||||
export NUM_MINIONS=${NUM_MINIONS:-3}
|
||||
|
||||
export SERVICE_CLUSTER_IP_RANGE=11.1.1.0/24
|
||||
export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24
|
||||
|
||||
export FLANNEL_NET=172.16.0.0/16
|
||||
```
|
||||
|
@ -103,7 +104,7 @@ The first variable `nodes` defines all your cluster nodes, MASTER node comes fir
|
|||
|
||||
Then the `roles ` variable defines the role of above machine in the same order, "ai" stands for machine acts as both master and node, "a" stands for master, "i" stands for node. So they are just defined the k8s cluster as the table above described.
|
||||
|
||||
The `NUM_MINIONS` variable defines the total number of nodes.
|
||||
The `NUM_MINIONS` variable defines the total number of minion nodes.
|
||||
|
||||
The `SERVICE_CLUSTER_IP_RANGE` variable defines the Kubernetes service IP range. Please make sure that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. You can use below three private network range according to rfc1918. Besides you'd better not choose the one that conflicts with your own private network range.
|
||||
|
||||
|
|
Loading…
Reference in New Issue