mirror of https://github.com/k3s-io/k3s
Remove nginx and replace basic auth with bearer token auth for GCE.
- Configure the apiserver to listen securely on 443 instead of 6443. - Configure the kubelet to connect to 443 instead of 6443. - Update documentation to refer to bearer tokens instead of basic auth.pull/6/head
parent
4ca8fbbec6
commit
dc45f7f9e6
|
@ -28,6 +28,7 @@ DEFAULT_KUBECONFIG="${HOME}/.kube/config"
|
|||
# Assumed vars:
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
# KUBE_BEARER_TOKEN
|
||||
# KUBE_MASTER_IP
|
||||
# KUBECONFIG
|
||||
# CONTEXT
|
||||
|
@ -56,10 +57,17 @@ function create-kubeconfig() {
|
|||
"--embed-certs=true"
|
||||
)
|
||||
fi
|
||||
local user_args=(
|
||||
local user_args=()
|
||||
if [[ -z "${KUBE_USER:-}" || -z "${KUBE_PASSWORD:-}" ]]; then
|
||||
user_args+=(
|
||||
"--token=${KUBE_BEARER_TOKEN}"
|
||||
)
|
||||
else
|
||||
user_args+=(
|
||||
"--username=${KUBE_USER}"
|
||||
"--password=${KUBE_PASSWORD}"
|
||||
)
|
||||
)
|
||||
fi
|
||||
if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then
|
||||
user_args+=(
|
||||
"--client-certificate=${KUBE_CERT}"
|
||||
|
@ -124,3 +132,28 @@ function get-kubeconfig-basicauth() {
|
|||
KUBE_PASSWORD=''
|
||||
fi
|
||||
}
|
||||
|
||||
# Get the bearer token for the current-context in kubeconfig if one exists.
|
||||
# Assumed vars:
|
||||
# KUBECONFIG # if unset, defaults to global
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_BEARER_TOKEN
|
||||
#
|
||||
# KUBE_BEARER_TOKEN will be empty if no current-context is set, or the
|
||||
# current-context user does not exist or contain a bearer token entry.
|
||||
function get-kubeconfig-bearertoken() {
|
||||
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
|
||||
# Template to safely extract the token for the current-context user.
|
||||
# The long chain of 'with' commands avoids indexing nil if any of the
|
||||
# entries ("current-context", "contexts"."current-context", "users", etc)
|
||||
# is missing.
|
||||
# Note: we save dot ('.') to $root because the 'with' action overrides it.
|
||||
# See http://golang.org/pkg/text/template/.
|
||||
local token='{{$dot := .}}{{with $ctx := index $dot "current-context"}}{{range $element := (index $dot "contexts")}}{{ if eq .name $ctx }}{{ with $user := .context.user }}{{range $element := (index $dot "users")}}{{ if eq .name $user }}{{ index . "user" "token" }}{{end}}{{end}}{{end}}{{end}}{{end}}{{end}}'
|
||||
KUBE_BEARER_TOKEN=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o template --template="${token}")
|
||||
# Handle empty/missing token
|
||||
if [[ "${KUBE_BEARER_TOKEN}" == '<no value>' ]]; then
|
||||
KUBE_BEARER_TOKEN=''
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -251,23 +251,17 @@ EOF
|
|||
}
|
||||
|
||||
# This should only happen on cluster initialization. Uses
|
||||
# MASTER_HTPASSWORD to generate the nginx/htpasswd file, and the
|
||||
# KUBELET_TOKEN, plus /dev/urandom, to generate known_tokens.csv
|
||||
# (KNOWN_TOKENS_FILE). After the first boot and on upgrade, these
|
||||
# files exist on the master-pd and should never be touched again
|
||||
# (except perhaps an additional service account, see NB below.)
|
||||
# KUBE_BEARER_TOKEN, KUBELET_TOKEN, and /dev/urandom to generate
|
||||
# known_tokens.csv (KNOWN_TOKENS_FILE). After the first boot and
|
||||
# on upgrade, this file exists on the master-pd and should never
|
||||
# be touched again (except perhaps an additional service account,
|
||||
# see NB below.)
|
||||
function create-salt-auth() {
|
||||
local -r htpasswd_file="/srv/salt-overlay/salt/nginx/htpasswd"
|
||||
|
||||
if [ ! -e "${htpasswd_file}" ]; then
|
||||
mkdir -p /srv/salt-overlay/salt/nginx
|
||||
echo "${MASTER_HTPASSWD}" > "${htpasswd_file}"
|
||||
fi
|
||||
|
||||
if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then
|
||||
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
||||
(umask 077;
|
||||
echo "${KUBELET_TOKEN},kubelet,kubelet" > "${KNOWN_TOKENS_FILE}")
|
||||
echo "${KUBE_BEARER_TOKEN},admin,admin" > "${KNOWN_TOKENS_FILE}";
|
||||
echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}")
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
|
||||
|
|
|
@ -272,30 +272,21 @@ function get-password {
|
|||
fi
|
||||
}
|
||||
|
||||
# Set MASTER_HTPASSWD
|
||||
function set-master-htpasswd {
|
||||
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
|
||||
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
|
||||
local htpasswd
|
||||
MASTER_HTPASSWD=$(cat "${KUBE_TEMP}/htpasswd")
|
||||
}
|
||||
|
||||
# Generate authentication token for admin user. Will
|
||||
# read from $HOME/.kubernetes_auth if available.
|
||||
# Ensure that we have a bearer token created for validating to the master.
|
||||
# Will read from kubeconfig for the current context if available.
|
||||
#
|
||||
# Assumed vars
|
||||
# KUBE_ROOT
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_ADMIN_TOKEN
|
||||
function get-admin-token {
|
||||
local file="$HOME/.kubernetes_auth"
|
||||
if [[ -r "$file" ]]; then
|
||||
KUBE_ADMIN_TOKEN=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["BearerToken"]')
|
||||
return
|
||||
# KUBE_BEARER_TOKEN
|
||||
function get-bearer-token() {
|
||||
get-kubeconfig-bearertoken
|
||||
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
|
||||
KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
fi
|
||||
KUBE_ADMIN_TOKEN=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))')
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Wait for background jobs to finish. Exit with
|
||||
# an error status if any of the jobs failed.
|
||||
function wait-for-jobs {
|
||||
|
@ -482,7 +473,7 @@ ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
|
|||
DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
|
||||
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
|
||||
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
|
||||
MASTER_HTPASSWD: $(yaml-quote ${MASTER_HTPASSWD})
|
||||
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
|
||||
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
|
||||
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
|
||||
EOF
|
||||
|
@ -516,8 +507,7 @@ function write-node-env {
|
|||
# variables are set:
|
||||
# ensure-temp-dir
|
||||
# detect-project
|
||||
# get-password
|
||||
# set-master-htpasswd
|
||||
# get-bearer-token
|
||||
#
|
||||
function create-master-instance {
|
||||
local address_opt=""
|
||||
|
@ -550,8 +540,7 @@ function kube-up {
|
|||
ensure-temp-dir
|
||||
detect-project
|
||||
|
||||
get-password
|
||||
set-master-htpasswd
|
||||
get-bearer-token
|
||||
|
||||
# Make sure we have the tar files staged on Google Storage
|
||||
find-release-tars
|
||||
|
@ -679,8 +668,9 @@ function kube-up {
|
|||
echo " up."
|
||||
echo
|
||||
|
||||
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
|
||||
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do
|
||||
until curl --insecure -H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
|
||||
--max-time 5 --fail --output /dev/null --silent \
|
||||
"https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do
|
||||
printf "."
|
||||
sleep 2
|
||||
done
|
||||
|
@ -859,8 +849,7 @@ function kube-push {
|
|||
detect-project
|
||||
detect-master
|
||||
detect-minion-names
|
||||
get-password
|
||||
set-master-htpasswd
|
||||
get-bearer-token
|
||||
|
||||
# Make sure we have the tar files staged on Google Storage
|
||||
find-release-tars
|
||||
|
|
|
@ -44,7 +44,11 @@
|
|||
{% set cert_file = "--tls_cert_file=/srv/kubernetes/server.cert" -%}
|
||||
{% set key_file = "--tls_private_key_file=/srv/kubernetes/server.key" -%}
|
||||
|
||||
{% set secure_port = "--secure_port=6443" -%}
|
||||
{% set secure_port = "6443" -%}
|
||||
{% if grains['cloud'] is defined and grains['cloud'] == 'gce' %}
|
||||
{% set secure_port = "443" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set token_auth_file = "--token_auth_file=/dev/null" -%}
|
||||
|
||||
{% if grains.cloud is defined -%}
|
||||
|
@ -77,24 +81,24 @@
|
|||
"/kube-apiserver",
|
||||
"{{address}}",
|
||||
"{{etcd_servers}}",
|
||||
"{{ cloud_provider }}",
|
||||
"{{ cloud_config }}",
|
||||
"{{ runtime_config }}",
|
||||
"{{cloud_provider}}",
|
||||
"{{cloud_config}}",
|
||||
"{{runtime_config}}",
|
||||
"{{admission_control}}",
|
||||
"--allow_privileged={{pillar['allow_privileged']}}",
|
||||
"{{portal_net}}",
|
||||
"{{cluster_name}}",
|
||||
"{{cert_file}}",
|
||||
"{{key_file}}",
|
||||
"{{secure_port}}",
|
||||
"--secure_port={{secure_port}}",
|
||||
"{{token_auth_file}}",
|
||||
"{{publicAddressOverride}}",
|
||||
"{{pillar['log_level']}}"
|
||||
],
|
||||
"ports":[
|
||||
{ "name": "https",
|
||||
"containerPort": 6443,
|
||||
"hostPort": 6443},{
|
||||
"containerPort": {{secure_port}},
|
||||
"hostPort": {{secure_port}}},{
|
||||
"name": "http",
|
||||
"containerPort": 7080,
|
||||
"hostPort": 7080},{
|
||||
|
|
|
@ -4,15 +4,22 @@
|
|||
{% endif -%}
|
||||
|
||||
{% if grains.api_servers is defined -%}
|
||||
{% set api_servers = "--api_servers=https://" + grains.api_servers + ":6443" -%}
|
||||
{% set api_servers = "--api_servers=https://" + grains.api_servers -%}
|
||||
{% elif grains.apiservers is defined -%} # TODO(remove after 0.16.0): Deprecated form
|
||||
{% set api_servers = "--api_servers=https://" + grains.apiservers + ":6443" -%}
|
||||
{% set api_servers = "--api_servers=https://" + grains.apiservers -%}
|
||||
{% elif grains['roles'][0] == 'kubernetes-master' -%}
|
||||
{% set master_ipv4 = salt['grains.get']('fqdn_ip4')[0] -%}
|
||||
{% set api_servers = "--api_servers=https://" + master_ipv4 + ":6443" -%}
|
||||
{% set api_servers = "--api_servers=https://" + master_ipv4 -%}
|
||||
{% else -%}
|
||||
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
|
||||
{% set api_servers = "--api_servers=https://" + ips[0][0] + ":6443" -%}
|
||||
{% set api_servers = "--api_servers=https://" + ips[0][0] -%}
|
||||
{% endif -%}
|
||||
|
||||
# TODO: remove nginx for other cloud providers.
|
||||
{% if grains['cloud'] is defined and grains['cloud'] == 'gce' -%}
|
||||
{% set api_servers_with_port = api_servers -%}
|
||||
{% else -%}
|
||||
{% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set config = "--config=/etc/kubernetes/manifests" -%}
|
||||
|
@ -33,4 +40,4 @@
|
|||
{% set docker_root = " --docker_root=" + grains.docker_root -%}
|
||||
{% endif -%}
|
||||
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers}} {{hostname_override}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}}"
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}}"
|
||||
|
|
|
@ -33,7 +33,9 @@ base:
|
|||
- kube-controller-manager
|
||||
- kube-scheduler
|
||||
- monit
|
||||
{% if grains['cloud'] is defined and grains['cloud'] != 'gce' %}
|
||||
- nginx
|
||||
{% endif %}
|
||||
- cadvisor
|
||||
- kube-client-tools
|
||||
- kube-master-addons
|
||||
|
|
|
@ -20,18 +20,20 @@ HTTP on 3 ports:
|
|||
- only GET requests are allowed.
|
||||
- requests are rate limited
|
||||
3. Secure Port
|
||||
- default is port 6443, change with `-secure_port`
|
||||
- default is port 443, change with `-secure_port`
|
||||
- default IP is first non-localhost network interface, change with `-public_address_override`
|
||||
- serves HTTPS. Set cert with `-tls_cert_file` and key with `-tls_private_key_file`.
|
||||
- uses token-file based [authentication](./authentication.md).
|
||||
- uses token-file or client-certificate based [authentication](./authentication.md).
|
||||
- uses policy-based [authorization](./authorization.md).
|
||||
|
||||
## Proxies and Firewall rules
|
||||
|
||||
Additionally, in typical configurations (i.e. GCE), there is a proxy (nginx) running
|
||||
Additionally, in some configurations there is a proxy (nginx) running
|
||||
on the same machine as the apiserver process. The proxy serves HTTPS protected
|
||||
by Basic Auth on port 443, and proxies to the apiserver on localhost:8080.
|
||||
Typically, firewall rules will allow HTTPS access to port 443.
|
||||
by Basic Auth on port 443, and proxies to the apiserver on localhost:8080. In
|
||||
these configurations the secure port is typically set to 6443.
|
||||
|
||||
A firewall rule is typically configured to allow external HTTPS access to port 443.
|
||||
|
||||
The above are defaults and reflect how Kubernetes is deployed to GCE using
|
||||
kube-up.sh. Other cloud providers may vary.
|
||||
|
@ -42,15 +44,15 @@ There are three differently configured serving ports because there are a
|
|||
variety of uses cases:
|
||||
1. Clients outside of a Kubernetes cluster, such as human running `kubectl`
|
||||
on desktop machine. Currently, accesses the Localhost Port via a proxy (nginx)
|
||||
running on the `kubernetes-master` machine. Proxy uses Basic Auth.
|
||||
running on the `kubernetes-master` machine. Proxy uses bearer token authentication.
|
||||
2. Processes running in Containers on Kubernetes that need to do read from
|
||||
the apiserver. Currently, these can use Readonly Port.
|
||||
3. Scheduler and Controller-manager processes, which need to do read-write
|
||||
API operations. Currently, these have to run on the
|
||||
API operations. Currently, these have to run on the
|
||||
operations on the apiserver. Currently, these have to run on the same
|
||||
host as the apiserver and use the Localhost Port.
|
||||
4. Kubelets, which need to do read-write API operations and are necessarily
|
||||
on different machines than the apiserver. Kubelet uses the Secure Port
|
||||
4. Kubelets, which need to do read-write API operations and are necessarily
|
||||
on different machines than the apiserver. Kubelet uses the Secure Port
|
||||
to get their pods, to find the services that a pod can see, and to
|
||||
write events. Credentials are distributed to kubelets at cluster
|
||||
setup time.
|
||||
|
@ -59,13 +61,14 @@ variety of uses cases:
|
|||
- Policy will limit the actions kubelets can do via the authed port.
|
||||
- Kube-proxy currently uses the readonly port to read services and endpoints,
|
||||
but will eventually use the auth port.
|
||||
- Kubelets may change from token-based authentication to cert-based-auth.
|
||||
- Kubelets will change from token-based authentication to cert-based-auth.
|
||||
- Scheduler and Controller-manager will use the Secure Port too. They
|
||||
will then be able to run on different machines than the apiserver.
|
||||
- A general mechanism will be provided for [giving credentials to
|
||||
pods](
|
||||
https://github.com/GoogleCloudPlatform/kubernetes/issues/1907).
|
||||
- The Readonly Port will no longer be needed and will be removed.
|
||||
- The Readonly Port will no longer be needed and [will be removed](
|
||||
https://github.com/GoogleCloudPlatform/kubernetes/issues/5921).
|
||||
- Clients, like kubectl, will all support token-based auth, and the
|
||||
Localhost will no longer be needed, and will not be the default.
|
||||
However, the localhost port may continue to be an option for
|
||||
|
|
|
@ -2,40 +2,40 @@
|
|||
|
||||
Client access to a running kubernetes cluster can be shared by copying
|
||||
the `kubectl` client config bundle ([.kubeconfig](kubeconfig-file.md)).
|
||||
This config bundle lives in `$HOME/.kube/.kubeconfig`, and is generated
|
||||
by `cluster/kube-up.sh`. Sample steps for sharing `.kubeconfig` below.
|
||||
This config bundle lives in `$HOME/.kube/config`, and is generated
|
||||
by `cluster/kube-up.sh`. Sample steps for sharing `kubeconfig` below.
|
||||
|
||||
**1. Create a cluster**
|
||||
```bash
|
||||
cluster/kube-up.sh
|
||||
```
|
||||
**2. Copy .kubeconfig to new host**
|
||||
**2. Copy `kubeconfig` to new host**
|
||||
```bash
|
||||
scp $HOME/.kube/.kubeconfig user@remotehost:/path/to/.kubeconfig
|
||||
scp $HOME/.kube/config user@remotehost:/path/to/.kube/config
|
||||
```
|
||||
|
||||
**3. On new host, make copied `.kubeconfig` available to `kubectl`**
|
||||
**3. On new host, make copied `config` available to `kubectl`**
|
||||
|
||||
* Option A: copy to default location
|
||||
```bash
|
||||
mv /path/to/.kubeconfig $HOME/.kube/.kubeconfig
|
||||
mv /path/to/.kube/config $HOME/.kube/config
|
||||
```
|
||||
* Option B: copy to working directory (from which kubectl is run)
|
||||
```bash
|
||||
mv /path/to/.kubeconfig $PWD
|
||||
mv /path/to/.kube/config $PWD
|
||||
```
|
||||
* Option C: manually pass `.kubeconfig` location to `.kubectl`
|
||||
* Option C: manually pass `kubeconfig` location to `.kubectl`
|
||||
```bash
|
||||
# via environment variable
|
||||
export KUBECONFIG=/path/to/.kubeconfig
|
||||
export KUBECONFIG=/path/to/.kube/config
|
||||
|
||||
# via commandline flag
|
||||
kubectl ... --kubeconfig=/path/to/.kubeconfig
|
||||
kubectl ... --kubeconfig=/path/to/.kube/config
|
||||
```
|
||||
|
||||
## Manually Generating `.kubeconfig`
|
||||
## Manually Generating `kubeconfig`
|
||||
|
||||
`.kubeconfig` is generated by `kube-up` but you can generate your own
|
||||
`kubeconfig` is generated by `kube-up` but you can generate your own
|
||||
using (any desired subset of) the following commands.
|
||||
|
||||
```bash
|
||||
|
@ -46,15 +46,15 @@ kubectl config set-cluster $CLUSTER_NICK
|
|||
--embed-certs=true \
|
||||
# Or if tls not needed, replace --certificate-authority and --embed-certs with
|
||||
--insecure-skip-tls-verify=true
|
||||
--kubeconfig=/path/to/standalone/.kubeconfig
|
||||
--kubeconfig=/path/to/standalone/.kube/config
|
||||
|
||||
# create user entry
|
||||
kubectl config set-credentials $USER_NICK
|
||||
# basic auth credentials, generated on kube master
|
||||
# bearer token credentials, generated on kube master
|
||||
--token=$token \
|
||||
# use either username|password or token, not both
|
||||
--username=$username \
|
||||
--password=$password \
|
||||
# use either username|password or token, not both
|
||||
--token=$token \
|
||||
--client-certificate=/path/to/crt_file \
|
||||
--client-key=/path/to/key_file \
|
||||
--embed-certs=true
|
||||
|
@ -65,42 +65,42 @@ kubectl config set-context $CONTEXT_NAME --cluster=$CLUSTER_NICKNAME --user=$USE
|
|||
```
|
||||
Notes:
|
||||
* The `--embed-certs` flag is needed to generate a standalone
|
||||
`.kubeconfig`, that will work as-is on another host.
|
||||
`kubeconfig`, that will work as-is on another host.
|
||||
* `--kubeconfig` is both the preferred file to load config from and the file to
|
||||
save config too. In the above commands the `--kubeconfig` file could be
|
||||
omitted if you first run
|
||||
```bash
|
||||
export KUBECONFIG=/path/to/standalone/.kubeconfig
|
||||
export KUBECONFIG=/path/to/standalone/.kube/config
|
||||
```
|
||||
* The ca_file, key_file, and cert_file referrenced above are generated on the
|
||||
kube master at cluster turnup. They can be found on the master under
|
||||
`/srv/kubernetes`. Basic auth/token are also generated on the kube master.
|
||||
`/srv/kubernetes`. Bearer token/basic auth are also generated on the kube master.
|
||||
|
||||
For more details on `.kubeconfig` see [kubeconfig-file.md](kubeconfig-file.md),
|
||||
For more details on `kubeconfig` see [kubeconfig-file.md](kubeconfig-file.md),
|
||||
and/or run `kubectl config -h`.
|
||||
|
||||
## Merging `.kubeconfig` Example
|
||||
## Merging `kubeconfig` Example
|
||||
|
||||
`kubectl` loads and merges config from the following locations (in order)
|
||||
|
||||
1. `--kubeconfig=path/to/kubeconfig` commandline flag
|
||||
2. `KUBECONFIG=path/to/kubeconfig` env variable
|
||||
1. `--kubeconfig=path/to/.kube/config` commandline flag
|
||||
2. `KUBECONFIG=path/to/.kube/config` env variable
|
||||
3. `$PWD/.kubeconfig`
|
||||
4. `$HOME/.kube/.kubeconfig`
|
||||
4. `$HOME/.kube/config`
|
||||
|
||||
If you create clusters A, B on host1, and clusters C, D on host2, you can
|
||||
make all four clusters available on both hosts by running
|
||||
|
||||
```bash
|
||||
# on host2, copy host1's default kubeconfig, and merge it from env
|
||||
scp host1:/path/to/home1/.kube/.kubeconfig path/to/other/.kubeconfig
|
||||
scp host1:/path/to/home1/.kube/config path/to/other/.kube/config
|
||||
|
||||
export $KUBECONFIG=path/to/other/.kubeconfig
|
||||
export $KUBECONFIG=path/to/other/.kube/config
|
||||
|
||||
# on host1, copy host2's default kubeconfig and merge it from env
|
||||
scp host2:/path/to/home2/.kube/.kubeconfig path/to/other/.kubeconfig
|
||||
scp host2:/path/to/home2/.kube/config path/to/other/.kube/config
|
||||
|
||||
export $KUBECONFIG=path/to/other/.kubeconfig
|
||||
export $KUBECONFIG=path/to/other/.kube/config
|
||||
```
|
||||
Detailed examples and explanation of `.kubeconfig` loading/merging rules can be found in [kubeconfig-file.md](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/kubeconfig-file.md).
|
||||
Detailed examples and explanation of `kubeconfig` loading/merging rules can be found in [kubeconfig-file.md](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/kubeconfig-file.md).
|
||||
|
||||
|
|
Loading…
Reference in New Issue