2017-02-27 22:35:12 +00:00
|
|
|
apiVersion: v1
|
|
|
|
kind: ReplicationController
|
|
|
|
metadata:
|
|
|
|
name: hollow-node
|
|
|
|
labels:
|
|
|
|
name: hollow-node
|
2017-10-03 17:24:25 +00:00
|
|
|
{{kubemark_mig_config}}
|
2017-02-27 22:35:12 +00:00
|
|
|
spec:
|
|
|
|
replicas: {{numreplicas}}
|
|
|
|
selector:
|
|
|
|
name: hollow-node
|
|
|
|
template:
|
|
|
|
metadata:
|
|
|
|
labels:
|
|
|
|
name: hollow-node
|
2017-10-03 17:24:25 +00:00
|
|
|
{{kubemark_mig_config}}
|
2017-02-27 22:35:12 +00:00
|
|
|
spec:
|
2017-05-04 09:41:33 +00:00
|
|
|
initContainers:
|
|
|
|
- name: init-inotify-limit
|
|
|
|
image: busybox
|
|
|
|
command: ['sysctl', '-w', 'fs.inotify.max_user_instances=200']
|
|
|
|
securityContext:
|
|
|
|
privileged: true
|
2017-02-27 22:35:12 +00:00
|
|
|
volumes:
|
|
|
|
- name: kubeconfig-volume
|
|
|
|
secret:
|
|
|
|
secretName: kubeconfig
|
|
|
|
- name: kernelmonitorconfig-volume
|
|
|
|
configMap:
|
|
|
|
name: node-configmap
|
|
|
|
- name: logs-volume
|
|
|
|
hostPath:
|
|
|
|
path: /var/log
|
|
|
|
- name: no-serviceaccount-access-to-real-master
|
|
|
|
emptyDir: {}
|
|
|
|
containers:
|
|
|
|
- name: hollow-kubelet
|
2018-04-13 12:56:52 +00:00
|
|
|
image: {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}}
|
2017-02-27 22:35:12 +00:00
|
|
|
ports:
|
|
|
|
- containerPort: 4194
|
|
|
|
- containerPort: 10250
|
|
|
|
- containerPort: 10255
|
|
|
|
env:
|
|
|
|
- name: CONTENT_TYPE
|
|
|
|
valueFrom:
|
|
|
|
configMapKeyRef:
|
|
|
|
name: node-configmap
|
|
|
|
key: content.type
|
|
|
|
- name: NODE_NAME
|
|
|
|
valueFrom:
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: metadata.name
|
|
|
|
command:
|
|
|
|
- /bin/sh
|
|
|
|
- -c
|
2018-07-06 10:30:36 +00:00
|
|
|
- /kubemark --morph=kubelet --name=$(NODE_NAME) {{hollow_kubelet_params}} --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --alsologtostderr 1>>/var/log/kubelet-$(NODE_NAME).log 2>&1
|
2017-02-27 22:35:12 +00:00
|
|
|
volumeMounts:
|
|
|
|
- name: kubeconfig-volume
|
|
|
|
mountPath: /kubeconfig
|
|
|
|
readOnly: true
|
|
|
|
- name: logs-volume
|
|
|
|
mountPath: /var/log
|
|
|
|
resources:
|
|
|
|
requests:
|
2017-08-09 12:30:10 +00:00
|
|
|
cpu: 40m
|
2017-02-27 22:35:12 +00:00
|
|
|
memory: 100M
|
|
|
|
securityContext:
|
|
|
|
privileged: true
|
|
|
|
- name: hollow-proxy
|
2018-04-13 12:56:52 +00:00
|
|
|
image: {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}}
|
2017-02-27 22:35:12 +00:00
|
|
|
env:
|
|
|
|
- name: CONTENT_TYPE
|
|
|
|
valueFrom:
|
|
|
|
configMapKeyRef:
|
|
|
|
name: node-configmap
|
|
|
|
key: content.type
|
|
|
|
- name: NODE_NAME
|
|
|
|
valueFrom:
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: metadata.name
|
|
|
|
command:
|
|
|
|
- /bin/sh
|
|
|
|
- -c
|
2018-07-06 10:30:36 +00:00
|
|
|
- /kubemark --morph=proxy --name=$(NODE_NAME) {{hollow_proxy_params}} --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --alsologtostderr 1>>/var/log/kubeproxy-$(NODE_NAME).log 2>&1
|
2017-02-27 22:35:12 +00:00
|
|
|
volumeMounts:
|
|
|
|
- name: kubeconfig-volume
|
|
|
|
mountPath: /kubeconfig
|
|
|
|
readOnly: true
|
|
|
|
- name: logs-volume
|
|
|
|
mountPath: /var/log
|
|
|
|
resources:
|
|
|
|
requests:
|
|
|
|
cpu: {{HOLLOW_PROXY_CPU}}m
|
|
|
|
memory: {{HOLLOW_PROXY_MEM}}Ki
|
|
|
|
- name: hollow-node-problem-detector
|
Switch to k8s.gcr.io vanity domain
This is the 2nd attempt. The previous was reverted while we figured out
the regional mirrors (oops).
New plan: k8s.gcr.io is a read-only facade that auto-detects your source
region (us, eu, or asia for now) and pulls from the closest. To publish
an image, push k8s-staging.gcr.io and it will be synced to the regionals
automatically (similar to today). For now the staging is an alias to
gcr.io/google_containers (the legacy URL).
When we move off of google-owned projects (working on it), then we just
do a one-time sync, and change the google-internal config, and nobody
outside should notice.
We can, in parallel, change the auto-sync into a manual sync - send a PR
to "promote" something from staging, and a bot activates it. Nice and
visible, easy to keep track of.
2018-01-17 19:36:53 +00:00
|
|
|
image: k8s.gcr.io/node-problem-detector:v0.4.1
|
2017-02-27 22:35:12 +00:00
|
|
|
env:
|
|
|
|
- name: NODE_NAME
|
|
|
|
valueFrom:
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: metadata.name
|
|
|
|
command:
|
|
|
|
- /bin/sh
|
|
|
|
- -c
|
2017-03-03 00:29:24 +00:00
|
|
|
- /node-problem-detector --system-log-monitors=/config/kernel.monitor --apiserver-override="https://{{master_ip}}:443?inClusterConfig=false&auth=/kubeconfig/npd.kubeconfig" --alsologtostderr 1>>/var/log/npd-$(NODE_NAME).log 2>&1
|
2017-02-27 22:35:12 +00:00
|
|
|
volumeMounts:
|
|
|
|
- name: kubeconfig-volume
|
|
|
|
mountPath: /kubeconfig
|
|
|
|
readOnly: true
|
|
|
|
- name: kernelmonitorconfig-volume
|
|
|
|
mountPath: /config
|
|
|
|
readOnly: true
|
|
|
|
- name: no-serviceaccount-access-to-real-master
|
|
|
|
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
|
|
|
readOnly: true
|
|
|
|
- name: logs-volume
|
|
|
|
mountPath: /var/log
|
|
|
|
resources:
|
|
|
|
requests:
|
|
|
|
cpu: 20m
|
|
|
|
memory: 20Mi
|
|
|
|
securityContext:
|
|
|
|
privileged: true
|
2018-08-16 08:45:32 +00:00
|
|
|
# Keep the pod running on unreachable node for 15 minutes.
|
|
|
|
# This time should be sufficient for a VM reboot and should
|
|
|
|
# avoid recreating a new hollow node.
|
|
|
|
# See https://github.com/kubernetes/kubernetes/issues/67120 for context.
|
|
|
|
tolerations:
|
|
|
|
- key: "node.kubernetes.io/unreachable"
|
|
|
|
operator: "Exists"
|
|
|
|
effect: "NoExecute"
|
|
|
|
tolerationSeconds: 900
|