Remove nginx and replace basic auth with bearer token auth for GCE.

- Configure the apiserver to listen securely on 443 instead of 6443.
 - Configure the kubelet to connect to 443 instead of 6443.
 - Update documentation to refer to bearer tokens instead of basic auth.
pull/6/head
Robert Bailey 2015-04-17 14:04:14 -07:00
parent 4ca8fbbec6
commit dc45f7f9e6
8 changed files with 127 additions and 95 deletions

View File

@ -28,6 +28,7 @@ DEFAULT_KUBECONFIG="${HOME}/.kube/config"
# Assumed vars: # Assumed vars:
# KUBE_USER # KUBE_USER
# KUBE_PASSWORD # KUBE_PASSWORD
# KUBE_BEARER_TOKEN
# KUBE_MASTER_IP # KUBE_MASTER_IP
# KUBECONFIG # KUBECONFIG
# CONTEXT # CONTEXT
@ -56,10 +57,17 @@ function create-kubeconfig() {
"--embed-certs=true" "--embed-certs=true"
) )
fi fi
local user_args=( local user_args=()
if [[ -z "${KUBE_USER:-}" || -z "${KUBE_PASSWORD:-}" ]]; then
user_args+=(
"--token=${KUBE_BEARER_TOKEN}"
)
else
user_args+=(
"--username=${KUBE_USER}" "--username=${KUBE_USER}"
"--password=${KUBE_PASSWORD}" "--password=${KUBE_PASSWORD}"
) )
fi
if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then
user_args+=( user_args+=(
"--client-certificate=${KUBE_CERT}" "--client-certificate=${KUBE_CERT}"
@ -124,3 +132,28 @@ function get-kubeconfig-basicauth() {
KUBE_PASSWORD='' KUBE_PASSWORD=''
fi fi
} }
# Get the bearer token for the current-context in kubeconfig if one exists.
# Assumed vars:
# KUBECONFIG # if unset, defaults to global
#
# Vars set:
# KUBE_BEARER_TOKEN
#
# KUBE_BEARER_TOKEN will be empty if no current-context is set, or the
# current-context user does not exist or contain a bearer token entry.
function get-kubeconfig-bearertoken() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
# Template to safely extract the token for the current-context user.
# The long chain of 'with' commands avoids indexing nil if any of the
# entries ("current-context", "contexts"."current-context", "users", etc)
# is missing.
# Note: we save dot ('.') to $root because the 'with' action overrides it.
# See http://golang.org/pkg/text/template/.
local token='{{$dot := .}}{{with $ctx := index $dot "current-context"}}{{range $element := (index $dot "contexts")}}{{ if eq .name $ctx }}{{ with $user := .context.user }}{{range $element := (index $dot "users")}}{{ if eq .name $user }}{{ index . "user" "token" }}{{end}}{{end}}{{end}}{{end}}{{end}}{{end}}'
KUBE_BEARER_TOKEN=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o template --template="${token}")
# Handle empty/missing token
if [[ "${KUBE_BEARER_TOKEN}" == '<no value>' ]]; then
KUBE_BEARER_TOKEN=''
fi
}

View File

@ -251,23 +251,17 @@ EOF
} }
# This should only happen on cluster initialization. Uses # This should only happen on cluster initialization. Uses
# MASTER_HTPASSWORD to generate the nginx/htpasswd file, and the # KUBE_BEARER_TOKEN, KUBELET_TOKEN, and /dev/urandom to generate
# KUBELET_TOKEN, plus /dev/urandom, to generate known_tokens.csv # known_tokens.csv (KNOWN_TOKENS_FILE). After the first boot and
# (KNOWN_TOKENS_FILE). After the first boot and on upgrade, these # on upgrade, this file exists on the master-pd and should never
# files exist on the master-pd and should never be touched again # be touched again (except perhaps an additional service account,
# (except perhaps an additional service account, see NB below.) # see NB below.)
function create-salt-auth() { function create-salt-auth() {
local -r htpasswd_file="/srv/salt-overlay/salt/nginx/htpasswd"
if [ ! -e "${htpasswd_file}" ]; then
mkdir -p /srv/salt-overlay/salt/nginx
echo "${MASTER_HTPASSWD}" > "${htpasswd_file}"
fi
if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077; (umask 077;
echo "${KUBELET_TOKEN},kubelet,kubelet" > "${KNOWN_TOKENS_FILE}") echo "${KUBE_BEARER_TOKEN},admin,admin" > "${KNOWN_TOKENS_FILE}";
echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}")
mkdir -p /srv/salt-overlay/salt/kubelet mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"

View File

@ -272,30 +272,21 @@ function get-password {
fi fi
} }
# Set MASTER_HTPASSWD # Ensure that we have a bearer token created for validating to the master.
function set-master-htpasswd { # Will read from kubeconfig for the current context if available.
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \ #
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD" # Assumed vars
local htpasswd # KUBE_ROOT
MASTER_HTPASSWD=$(cat "${KUBE_TEMP}/htpasswd")
}
# Generate authentication token for admin user. Will
# read from $HOME/.kubernetes_auth if available.
# #
# Vars set: # Vars set:
# KUBE_ADMIN_TOKEN # KUBE_BEARER_TOKEN
function get-admin-token { function get-bearer-token() {
local file="$HOME/.kubernetes_auth" get-kubeconfig-bearertoken
if [[ -r "$file" ]]; then if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
KUBE_ADMIN_TOKEN=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["BearerToken"]') KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
return
fi fi
KUBE_ADMIN_TOKEN=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))')
} }
# Wait for background jobs to finish. Exit with # Wait for background jobs to finish. Exit with
# an error status if any of the jobs failed. # an error status if any of the jobs failed.
function wait-for-jobs { function wait-for-jobs {
@ -482,7 +473,7 @@ ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-}) DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-}) DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-}) DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
MASTER_HTPASSWD: $(yaml-quote ${MASTER_HTPASSWD}) KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-}) ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE}) MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
EOF EOF
@ -516,8 +507,7 @@ function write-node-env {
# variables are set: # variables are set:
# ensure-temp-dir # ensure-temp-dir
# detect-project # detect-project
# get-password # get-bearer-token
# set-master-htpasswd
# #
function create-master-instance { function create-master-instance {
local address_opt="" local address_opt=""
@ -550,8 +540,7 @@ function kube-up {
ensure-temp-dir ensure-temp-dir
detect-project detect-project
get-password get-bearer-token
set-master-htpasswd
# Make sure we have the tar files staged on Google Storage # Make sure we have the tar files staged on Google Storage
find-release-tars find-release-tars
@ -679,8 +668,9 @@ function kube-up {
echo " up." echo " up."
echo echo
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \ until curl --insecure -H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do --max-time 5 --fail --output /dev/null --silent \
"https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do
printf "." printf "."
sleep 2 sleep 2
done done
@ -859,8 +849,7 @@ function kube-push {
detect-project detect-project
detect-master detect-master
detect-minion-names detect-minion-names
get-password get-bearer-token
set-master-htpasswd
# Make sure we have the tar files staged on Google Storage # Make sure we have the tar files staged on Google Storage
find-release-tars find-release-tars

View File

@ -44,7 +44,11 @@
{% set cert_file = "--tls_cert_file=/srv/kubernetes/server.cert" -%} {% set cert_file = "--tls_cert_file=/srv/kubernetes/server.cert" -%}
{% set key_file = "--tls_private_key_file=/srv/kubernetes/server.key" -%} {% set key_file = "--tls_private_key_file=/srv/kubernetes/server.key" -%}
{% set secure_port = "--secure_port=6443" -%} {% set secure_port = "6443" -%}
{% if grains['cloud'] is defined and grains['cloud'] == 'gce' %}
{% set secure_port = "443" -%}
{% endif -%}
{% set token_auth_file = "--token_auth_file=/dev/null" -%} {% set token_auth_file = "--token_auth_file=/dev/null" -%}
{% if grains.cloud is defined -%} {% if grains.cloud is defined -%}
@ -86,15 +90,15 @@
"{{cluster_name}}", "{{cluster_name}}",
"{{cert_file}}", "{{cert_file}}",
"{{key_file}}", "{{key_file}}",
"{{secure_port}}", "--secure_port={{secure_port}}",
"{{token_auth_file}}", "{{token_auth_file}}",
"{{publicAddressOverride}}", "{{publicAddressOverride}}",
"{{pillar['log_level']}}" "{{pillar['log_level']}}"
], ],
"ports":[ "ports":[
{ "name": "https", { "name": "https",
"containerPort": 6443, "containerPort": {{secure_port}},
"hostPort": 6443},{ "hostPort": {{secure_port}}},{
"name": "http", "name": "http",
"containerPort": 7080, "containerPort": 7080,
"hostPort": 7080},{ "hostPort": 7080},{

View File

@ -4,15 +4,22 @@
{% endif -%} {% endif -%}
{% if grains.api_servers is defined -%} {% if grains.api_servers is defined -%}
{% set api_servers = "--api_servers=https://" + grains.api_servers + ":6443" -%} {% set api_servers = "--api_servers=https://" + grains.api_servers -%}
{% elif grains.apiservers is defined -%} # TODO(remove after 0.16.0): Deprecated form {% elif grains.apiservers is defined -%} # TODO(remove after 0.16.0): Deprecated form
{% set api_servers = "--api_servers=https://" + grains.apiservers + ":6443" -%} {% set api_servers = "--api_servers=https://" + grains.apiservers -%}
{% elif grains['roles'][0] == 'kubernetes-master' -%} {% elif grains['roles'][0] == 'kubernetes-master' -%}
{% set master_ipv4 = salt['grains.get']('fqdn_ip4')[0] -%} {% set master_ipv4 = salt['grains.get']('fqdn_ip4')[0] -%}
{% set api_servers = "--api_servers=https://" + master_ipv4 + ":6443" -%} {% set api_servers = "--api_servers=https://" + master_ipv4 -%}
{% else -%} {% else -%}
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%} {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{% set api_servers = "--api_servers=https://" + ips[0][0] + ":6443" -%} {% set api_servers = "--api_servers=https://" + ips[0][0] -%}
{% endif -%}
# TODO: remove nginx for other cloud providers.
{% if grains['cloud'] is defined and grains['cloud'] == 'gce' -%}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}
{% endif -%} {% endif -%}
{% set config = "--config=/etc/kubernetes/manifests" -%} {% set config = "--config=/etc/kubernetes/manifests" -%}
@ -33,4 +40,4 @@
{% set docker_root = " --docker_root=" + grains.docker_root -%} {% set docker_root = " --docker_root=" + grains.docker_root -%}
{% endif -%} {% endif -%}
DAEMON_ARGS="{{daemon_args}} {{api_servers}} {{hostname_override}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}}" DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}}"

View File

@ -33,7 +33,9 @@ base:
- kube-controller-manager - kube-controller-manager
- kube-scheduler - kube-scheduler
- monit - monit
{% if grains['cloud'] is defined and grains['cloud'] != 'gce' %}
- nginx - nginx
{% endif %}
- cadvisor - cadvisor
- kube-client-tools - kube-client-tools
- kube-master-addons - kube-master-addons

View File

@ -20,18 +20,20 @@ HTTP on 3 ports:
- only GET requests are allowed. - only GET requests are allowed.
- requests are rate limited - requests are rate limited
3. Secure Port 3. Secure Port
- default is port 6443, change with `-secure_port` - default is port 443, change with `-secure_port`
- default IP is first non-localhost network interface, change with `-public_address_override` - default IP is first non-localhost network interface, change with `-public_address_override`
- serves HTTPS. Set cert with `-tls_cert_file` and key with `-tls_private_key_file`. - serves HTTPS. Set cert with `-tls_cert_file` and key with `-tls_private_key_file`.
- uses token-file based [authentication](./authentication.md). - uses token-file or client-certificate based [authentication](./authentication.md).
- uses policy-based [authorization](./authorization.md). - uses policy-based [authorization](./authorization.md).
## Proxies and Firewall rules ## Proxies and Firewall rules
Additionally, in typical configurations (i.e. GCE), there is a proxy (nginx) running Additionally, in some configurations there is a proxy (nginx) running
on the same machine as the apiserver process. The proxy serves HTTPS protected on the same machine as the apiserver process. The proxy serves HTTPS protected
by Basic Auth on port 443, and proxies to the apiserver on localhost:8080. by Basic Auth on port 443, and proxies to the apiserver on localhost:8080. In
Typically, firewall rules will allow HTTPS access to port 443. these configurations the secure port is typically set to 6443.
A firewall rule is typically configured to allow external HTTPS access to port 443.
The above are defaults and reflect how Kubernetes is deployed to GCE using The above are defaults and reflect how Kubernetes is deployed to GCE using
kube-up.sh. Other cloud providers may vary. kube-up.sh. Other cloud providers may vary.
@ -42,7 +44,7 @@ There are three differently configured serving ports because there are a
variety of uses cases: variety of uses cases:
1. Clients outside of a Kubernetes cluster, such as human running `kubectl` 1. Clients outside of a Kubernetes cluster, such as human running `kubectl`
on desktop machine. Currently, accesses the Localhost Port via a proxy (nginx) on desktop machine. Currently, accesses the Localhost Port via a proxy (nginx)
running on the `kubernetes-master` machine. Proxy uses Basic Auth. running on the `kubernetes-master` machine. Proxy uses bearer token authentication.
2. Processes running in Containers on Kubernetes that need to do read from 2. Processes running in Containers on Kubernetes that need to do read from
the apiserver. Currently, these can use Readonly Port. the apiserver. Currently, these can use Readonly Port.
3. Scheduler and Controller-manager processes, which need to do read-write 3. Scheduler and Controller-manager processes, which need to do read-write
@ -59,13 +61,14 @@ variety of uses cases:
- Policy will limit the actions kubelets can do via the authed port. - Policy will limit the actions kubelets can do via the authed port.
- Kube-proxy currently uses the readonly port to read services and endpoints, - Kube-proxy currently uses the readonly port to read services and endpoints,
but will eventually use the auth port. but will eventually use the auth port.
- Kubelets may change from token-based authentication to cert-based-auth. - Kubelets will change from token-based authentication to cert-based-auth.
- Scheduler and Controller-manager will use the Secure Port too. They - Scheduler and Controller-manager will use the Secure Port too. They
will then be able to run on different machines than the apiserver. will then be able to run on different machines than the apiserver.
- A general mechanism will be provided for [giving credentials to - A general mechanism will be provided for [giving credentials to
pods]( pods](
https://github.com/GoogleCloudPlatform/kubernetes/issues/1907). https://github.com/GoogleCloudPlatform/kubernetes/issues/1907).
- The Readonly Port will no longer be needed and will be removed. - The Readonly Port will no longer be needed and [will be removed](
https://github.com/GoogleCloudPlatform/kubernetes/issues/5921).
- Clients, like kubectl, will all support token-based auth, and the - Clients, like kubectl, will all support token-based auth, and the
Localhost will no longer be needed, and will not be the default. Localhost will no longer be needed, and will not be the default.
However, the localhost port may continue to be an option for However, the localhost port may continue to be an option for

View File

@ -2,40 +2,40 @@
Client access to a running kubernetes cluster can be shared by copying Client access to a running kubernetes cluster can be shared by copying
the `kubectl` client config bundle ([.kubeconfig](kubeconfig-file.md)). the `kubectl` client config bundle ([.kubeconfig](kubeconfig-file.md)).
This config bundle lives in `$HOME/.kube/.kubeconfig`, and is generated This config bundle lives in `$HOME/.kube/config`, and is generated
by `cluster/kube-up.sh`. Sample steps for sharing `.kubeconfig` below. by `cluster/kube-up.sh`. Sample steps for sharing `kubeconfig` below.
**1. Create a cluster** **1. Create a cluster**
```bash ```bash
cluster/kube-up.sh cluster/kube-up.sh
``` ```
**2. Copy .kubeconfig to new host** **2. Copy `kubeconfig` to new host**
```bash ```bash
scp $HOME/.kube/.kubeconfig user@remotehost:/path/to/.kubeconfig scp $HOME/.kube/config user@remotehost:/path/to/.kube/config
``` ```
**3. On new host, make copied `.kubeconfig` available to `kubectl`** **3. On new host, make copied `config` available to `kubectl`**
* Option A: copy to default location * Option A: copy to default location
```bash ```bash
mv /path/to/.kubeconfig $HOME/.kube/.kubeconfig mv /path/to/.kube/config $HOME/.kube/config
``` ```
* Option B: copy to working directory (from which kubectl is run) * Option B: copy to working directory (from which kubectl is run)
```bash ```bash
mv /path/to/.kubeconfig $PWD mv /path/to/.kube/config $PWD
``` ```
* Option C: manually pass `.kubeconfig` location to `.kubectl` * Option C: manually pass `kubeconfig` location to `.kubectl`
```bash ```bash
# via environment variable # via environment variable
export KUBECONFIG=/path/to/.kubeconfig export KUBECONFIG=/path/to/.kube/config
# via commandline flag # via commandline flag
kubectl ... --kubeconfig=/path/to/.kubeconfig kubectl ... --kubeconfig=/path/to/.kube/config
``` ```
## Manually Generating `.kubeconfig` ## Manually Generating `kubeconfig`
`.kubeconfig` is generated by `kube-up` but you can generate your own `kubeconfig` is generated by `kube-up` but you can generate your own
using (any desired subset of) the following commands. using (any desired subset of) the following commands.
```bash ```bash
@ -46,15 +46,15 @@ kubectl config set-cluster $CLUSTER_NICK
--embed-certs=true \ --embed-certs=true \
# Or if tls not needed, replace --certificate-authority and --embed-certs with # Or if tls not needed, replace --certificate-authority and --embed-certs with
--insecure-skip-tls-verify=true --insecure-skip-tls-verify=true
--kubeconfig=/path/to/standalone/.kubeconfig --kubeconfig=/path/to/standalone/.kube/config
# create user entry # create user entry
kubectl config set-credentials $USER_NICK kubectl config set-credentials $USER_NICK
# basic auth credentials, generated on kube master # bearer token credentials, generated on kube master
--token=$token \
# use either username|password or token, not both
--username=$username \ --username=$username \
--password=$password \ --password=$password \
# use either username|password or token, not both
--token=$token \
--client-certificate=/path/to/crt_file \ --client-certificate=/path/to/crt_file \
--client-key=/path/to/key_file \ --client-key=/path/to/key_file \
--embed-certs=true --embed-certs=true
@ -65,42 +65,42 @@ kubectl config set-context $CONTEXT_NAME --cluster=$CLUSTER_NICKNAME --user=$USE
``` ```
Notes: Notes:
* The `--embed-certs` flag is needed to generate a standalone * The `--embed-certs` flag is needed to generate a standalone
`.kubeconfig`, that will work as-is on another host. `kubeconfig`, that will work as-is on another host.
* `--kubeconfig` is both the preferred file to load config from and the file to * `--kubeconfig` is both the preferred file to load config from and the file to
save config too. In the above commands the `--kubeconfig` file could be save config too. In the above commands the `--kubeconfig` file could be
omitted if you first run omitted if you first run
```bash ```bash
export KUBECONFIG=/path/to/standalone/.kubeconfig export KUBECONFIG=/path/to/standalone/.kube/config
``` ```
* The ca_file, key_file, and cert_file referrenced above are generated on the * The ca_file, key_file, and cert_file referrenced above are generated on the
kube master at cluster turnup. They can be found on the master under kube master at cluster turnup. They can be found on the master under
`/srv/kubernetes`. Basic auth/token are also generated on the kube master. `/srv/kubernetes`. Bearer token/basic auth are also generated on the kube master.
For more details on `.kubeconfig` see [kubeconfig-file.md](kubeconfig-file.md), For more details on `kubeconfig` see [kubeconfig-file.md](kubeconfig-file.md),
and/or run `kubectl config -h`. and/or run `kubectl config -h`.
## Merging `.kubeconfig` Example ## Merging `kubeconfig` Example
`kubectl` loads and merges config from the following locations (in order) `kubectl` loads and merges config from the following locations (in order)
1. `--kubeconfig=path/to/kubeconfig` commandline flag 1. `--kubeconfig=path/to/.kube/config` commandline flag
2. `KUBECONFIG=path/to/kubeconfig` env variable 2. `KUBECONFIG=path/to/.kube/config` env variable
3. `$PWD/.kubeconfig` 3. `$PWD/.kubeconfig`
4. `$HOME/.kube/.kubeconfig` 4. `$HOME/.kube/config`
If you create clusters A, B on host1, and clusters C, D on host2, you can If you create clusters A, B on host1, and clusters C, D on host2, you can
make all four clusters available on both hosts by running make all four clusters available on both hosts by running
```bash ```bash
# on host2, copy host1's default kubeconfig, and merge it from env # on host2, copy host1's default kubeconfig, and merge it from env
scp host1:/path/to/home1/.kube/.kubeconfig path/to/other/.kubeconfig scp host1:/path/to/home1/.kube/config path/to/other/.kube/config
export $KUBECONFIG=path/to/other/.kubeconfig export $KUBECONFIG=path/to/other/.kube/config
# on host1, copy host2's default kubeconfig and merge it from env # on host1, copy host2's default kubeconfig and merge it from env
scp host2:/path/to/home2/.kube/.kubeconfig path/to/other/.kubeconfig scp host2:/path/to/home2/.kube/config path/to/other/.kube/config
export $KUBECONFIG=path/to/other/.kubeconfig export $KUBECONFIG=path/to/other/.kube/config
``` ```
Detailed examples and explanation of `.kubeconfig` loading/merging rules can be found in [kubeconfig-file.md](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/kubeconfig-file.md). Detailed examples and explanation of `kubeconfig` loading/merging rules can be found in [kubeconfig-file.md](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/kubeconfig-file.md).