Kubelet talks securely to apiserver.

Configure apiserver to serve Securely on port 6443.
Generate token for kubelets during master VM startup.
Put token into file apiserver can get and another file the kubelets can get.
Added e2e test.
pull/6/head
Eric Tune 2014-11-13 15:45:34 -08:00
parent df0981bc01
commit 46dcacfa93
6 changed files with 114 additions and 2 deletions

View File

@ -28,3 +28,19 @@ EOF
mkdir -p /srv/salt-overlay/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd
# TODO: do aws.
# Generate and distribute a shared secret (bearer token) to
# apiserver and kubelet so that kubelet can authenticate to
# apiserver to send events.
# This works on CoreOS, so it should work on a lot of distros.
kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
mkdir -p /srv/salt-overlay/salt/kube-apiserver
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file)
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)

View File

@ -29,5 +29,7 @@
{% set cert_file = "-tls_cert_file=/srv/kubernetes/server.cert" %}
{% set key_file = "-tls_private_key_file=/srv/kubernetes/server.key" %}
{% set secure_port = "-secure_port=6443" %}
{% set token_auth_file = "-token_auth_file=/srv/kubernetes/known_tokens.csv" %}
DAEMON_ARGS="{{daemon_args}} {{address}} {{etcd_servers}} {{ cloud_provider }} --allow_privileged={{pillar['allow_privileged']}} {{portal_net}} {{cert_file}} {{key_file}}"
DAEMON_ARGS="{{daemon_args}} {{address}} {{etcd_servers}} {{ cloud_provider }} --allow_privileged={{pillar['allow_privileged']}} {{portal_net}} {{cert_file}} {{key_file}} {{secure_port}} {{token_auth_file}}"

View File

@ -38,6 +38,13 @@
{% endif %}
/srv/kubernetes/known_tokens.csv:
file.managed:
- source: salt://kube-apiserver/known_tokens.csv
- user: kube-apiserver
- group: kube-apiserver
- mode: 400
kube-apiserver:
group.present:
- system: True

View File

@ -9,6 +9,13 @@
{% set etcd_servers = "-etcd_servers=http://" + ips[0][0] + ":4001" %}
{% endif %}
{% if grains.apiservers is defined %}
{% set apiservers = "-api_servers=https://" + grains.apiservers + ":6443" %}
{% else %}
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %}
{% set apiservers = "-api_servers=https://" + ips[0][0] + ":6443" %}
{% endif %}
{% set address = "-address=0.0.0.0" %}
{% set config = "-config=/etc/kubernetes/manifests" %}
{% set hostname_override = "" %}
@ -16,5 +23,6 @@
{% set hostname_override = " -hostname_override=" + grains.minion_ip %}
{% endif %}
{% set auth_path = "-auth_path=/var/lib/kubelet/kubernetes_auth" %}
DAEMON_ARGS="{{daemon_args}} {{etcd_servers}} {{hostname_override}} {{address}} {{config}} --allow_privileged={{pillar['allow_privileged']}}"
DAEMON_ARGS="{{daemon_args}} {{etcd_servers}} {{apiservers}} {{auth_path}} {{hostname_override}} {{address}} {{config}} --allow_privileged={{pillar['allow_privileged']}}"

View File

@ -38,6 +38,14 @@
{% endif %}
# Kubelet will run without this file but will not be able to send events to the apiserver.
/var/lib/kubelet/kubernetes_auth:
file.managed:
- source: salt://kubelet/kubernetes_auth
- user: root
- group: root
- mode: 400
kubelet:
group.present:
- system: True
@ -57,4 +65,5 @@ kubelet:
{% if grains['os_family'] != 'RedHat' %}
- file: /etc/init.d/kubelet
{% endif %}
- file: /var/lib/kubelet/kubernetes_auth

View File

@ -169,6 +169,75 @@ func TestPodUpdate(c *client.Client) bool {
return true
}
// TestKubeletSendsEvent checks that kubelets and scheduler send events about pods scheduling and running.
func TestKubeletSendsEvent(c *client.Client) bool {
podClient := c.Pods(api.NamespaceDefault)
pod := loadPodOrDie("./api/examples/pod.json")
value := strconv.Itoa(time.Now().Nanosecond())
pod.Labels["time"] = value
_, err := podClient.Create(pod)
if err != nil {
glog.Errorf("Failed to create pod: %v", err)
return false
}
defer podClient.Delete(pod.Name)
waitForPodRunning(c, pod.Name)
pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
if len(pods.Items) != 1 {
glog.Errorf("Failed to find the correct pod")
return false
}
_, err = podClient.Get(pod.Name)
if err != nil {
glog.Errorf("Failed to get pod: %v", err)
return false
}
// Check for scheduler event about the pod.
events, err := c.Events(api.NamespaceDefault).List(
labels.Everything(),
labels.Set{
"involvedObject.name": pod.Name,
"involvedObject.kind": "Pod",
"involvedObject.namespace": api.NamespaceDefault,
"source": "scheduler",
}.AsSelector(),
)
if err != nil {
glog.Error("Error while listing events:", err)
return false
}
if len(events.Items) == 0 {
glog.Error("Didn't see any scheduler events even though pod was running.")
return false
}
glog.Info("Saw scheduler event for our pod.")
// Check for kubelet event about the pod.
events, err = c.Events(api.NamespaceDefault).List(
labels.Everything(),
labels.Set{
"involvedObject.name": pod.Name,
"involvedObject.kind": "BoundPod",
"involvedObject.namespace": api.NamespaceDefault,
"source": "kubelet",
}.AsSelector(),
)
if err != nil {
glog.Error("Error while listing events:", err)
return false
}
if len(events.Items) == 0 {
glog.Error("Didn't see any kubelet events even though pod was running.")
return false
}
glog.Info("Saw kubelet event for our pod.")
return true
}
func main() {
flag.Parse()
runtime.GOMAXPROCS(runtime.NumCPU())
@ -186,6 +255,7 @@ func main() {
tests := []func(c *client.Client) bool{
TestKubernetesROService,
TestKubeletSendsEvent,
// TODO(brendandburns): fix this test and re-add it: TestPodUpdate,
}