Get Vagrant to start using TLS client certs.

Also fix up cert generation. It was failing during the first salt highstate when trying to chown the certs as the apiserver user didn't exist yet.  Fix this by creating a 'kube-cert' group and chgrping the files to that.  Then make the apiserver a member of that group.

Fixes #2365
Fixes #2368
pull/6/head
Joe Beda 2014-11-13 22:14:56 -08:00
parent 7a6743808a
commit 5a0159ea00
11 changed files with 61 additions and 31 deletions

View File

@ -83,16 +83,9 @@ fi
# When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster.
if [ "$KUBERNETES_PROVIDER" == "vagrant" ]; then
cat >~/.kubernetes_vagrant_auth <<EOF
{
"User": "vagrant",
"Password": "vagrant"
}
EOF
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
auth_config=(
"-auth" "$HOME/.kubernetes_vagrant_auth"
"-insecure_skip_tls_verify"
)
else
auth_config=()

View File

@ -84,15 +84,8 @@ fi
# When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster.
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
cat >~/.kubernetes_vagrant_auth <<EOF
{
"User": "vagrant",
"Password": "vagrant"
}
EOF
auth_config=(
"--auth-path=$HOME/.kubernetes_vagrant_auth"
"--insecure-skip-tls-verify=true"
)
else
auth_config=()

View File

@ -44,6 +44,8 @@ apiserver:
user.present:
- system: True
- gid_from_name: True
- groups:
- kube-cert
- shell: /sbin/nologin
- home: /var/apiserver
- require:

View File

@ -6,7 +6,7 @@
{% set cert_ip='_use_aws_external_ip_' %}
{% endif %}
{% if grains.cloud == 'vagrant' %}
{% set cert_ip=grains.fqdn_ip4 %}
{% set cert_ip=grains.ip_interfaces.eth1[0] %}
{% endif %}
{% if grains.cloud == 'vsphere' %}
{% set cert_ip=grains.ip_interfaces.eth0[0] %}
@ -23,6 +23,10 @@
{% set certgen="make-ca-cert.sh" %}
{% endif %}
kube-cert:
group.present:
- system: True
kubernetes-cert:
cmd.script:
- unless: test -f /srv/kubernetes/server.cert

View File

@ -20,7 +20,7 @@ set -o pipefail
cert_ip=$1
cert_dir=/srv/kubernetes
cert_file_owner=apiserver.apiserver
cert_group=kube-cert
mkdir -p "$cert_dir"
@ -63,4 +63,5 @@ cp -p pki/ca.crt "${cert_dir}/ca.crt"
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
# Make server certs accessible to apiserver.
chown $cert_file_owner "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.cert"
chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"

View File

@ -15,11 +15,12 @@
# limitations under the License.
cert_dir=/srv/kubernetes
cert_file_owner=apiserver.apiserver
cert_group=kube-cert
mkdir -p "$cert_dir"
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-subj "/CN=kubernetes.invalid/O=Kubernetes" \
-keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert"
chown $cert_file_owner "${cert_dir}/server.key" "${cert_dir}/server.cert"
chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert"
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert"

View File

@ -1,14 +1,6 @@
nginx:
pkg:
- installed
service:
- running
- watch:
- pkg: nginx
- file: /etc/nginx/nginx.conf
- file: /etc/nginx/sites-enabled/default
- file: /usr/share/nginx/htpasswd
- cmd: kubernetes-cert
/etc/nginx/nginx.conf:
file:
@ -36,3 +28,13 @@ nginx:
- group: root
- mode: 644
nginx-service:
service:
- running
- name: nginx
- watch:
- pkg: nginx
- file: /etc/nginx/nginx.conf
- file: /etc/nginx/sites-enabled/default
- file: /usr/share/nginx/htpasswd
- cmd: kubernetes-cert

View File

@ -70,6 +70,7 @@ grains:
master_ip: $MASTER_IP
network_mode: openvswitch
etcd_servers: $MASTER_IP
cloud: vagrant
cloud_provider: vagrant
roles:
- kubernetes-master
@ -78,6 +79,7 @@ EOF
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
portal_net: $PORTAL_NET
cert_ip: $MASTER_IP
EOF
# Configure the salt-master

View File

@ -43,6 +43,29 @@ function kube-up {
get-password
vagrant up
local kube_cert=".kubecfg.vagrant.crt"
local kube_key=".kubecfg.vagrant.key"
local ca_cert=".kubernetes.vagrant.ca.crt"
(umask 077
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
cat << EOF > ~/.kubernetes_vagrant_auth
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD",
"CAFile": "$HOME/$ca_cert",
"CertFile": "$HOME/$kube_cert",
"KeyFile": "$HOME/$kube_key"
}
EOF
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
)
echo "Each machine instance has been created."
echo " Now waiting for the Salt provisioning process to complete on each machine."
echo " This can take some time based on your network, disk, and cpu speed."
@ -108,7 +131,7 @@ function kube-up {
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_auth."
echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth."
echo
}

View File

@ -52,7 +52,7 @@ The following enumerates the set of defined key/value pairs that are supported t
Key | Value
------------- | -------------
`cbr-cidr` | (Optional) The minion IP address range used for the docker container bridge.
`cloud` | (Optional) Which IaaS platform is used to host kubernetes, *gce*, *azure*
`cloud` | (Optional) Which IaaS platform is used to host kubernetes, *gce*, *azure*, *aws*, *vagrant*
`cloud_provider` | (Optional) The cloud_provider used by apiserver: *gce*, *azure*, *vagrant*
`etcd_servers` | (Optional) Comma-delimited list of IP addresses the apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role.
`hostnamef` | (Optional) The full host name of the machine, i.e. hostname -f

View File

@ -63,5 +63,14 @@ locations=(
)
e2e=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
# When we are using vagrant it has hard coded auth. We repeat that here so that
# we don't clobber auth that might be used for a publicly facing cluster.
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
auth_config=(
"--auth_config=$HOME/.kubernetes_vagrant_auth"
)
else
auth_config=()
fi
"${e2e}" -host="https://${KUBE_MASTER_IP-}"
"${e2e}" "${auth_config[@]:+${auth_config[@]}}" -host="https://${KUBE_MASTER_IP-}"