2017-01-19 09:20:43 +00:00
|
|
|
apiVersion: v1
|
|
|
|
kind: ServiceAccount
|
|
|
|
metadata:
|
|
|
|
name: node-problem-detector
|
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
|
|
|
kubernetes.io/cluster-service: "true"
|
2017-02-25 00:53:12 +00:00
|
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
2017-01-19 09:20:43 +00:00
|
|
|
---
|
2017-08-21 09:29:10 +00:00
|
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
2017-01-19 09:20:43 +00:00
|
|
|
kind: ClusterRoleBinding
|
|
|
|
metadata:
|
|
|
|
name: npd-binding
|
|
|
|
labels:
|
|
|
|
kubernetes.io/cluster-service: "true"
|
2017-02-25 00:53:12 +00:00
|
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
2017-01-19 09:20:43 +00:00
|
|
|
roleRef:
|
|
|
|
apiGroup: rbac.authorization.k8s.io
|
|
|
|
kind: ClusterRole
|
|
|
|
name: system:node-problem-detector
|
|
|
|
subjects:
|
|
|
|
- kind: ServiceAccount
|
|
|
|
name: node-problem-detector
|
|
|
|
namespace: kube-system
|
|
|
|
---
|
|
|
|
apiVersion: extensions/v1beta1
|
|
|
|
kind: DaemonSet
|
|
|
|
metadata:
|
2017-06-22 00:01:25 +00:00
|
|
|
name: npd-v0.4.1
|
2017-01-19 09:20:43 +00:00
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
|
|
|
k8s-app: node-problem-detector
|
2017-06-22 00:01:25 +00:00
|
|
|
version: v0.4.1
|
2017-01-19 09:20:43 +00:00
|
|
|
kubernetes.io/cluster-service: "true"
|
2017-02-25 00:53:12 +00:00
|
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
2017-01-19 09:20:43 +00:00
|
|
|
spec:
|
|
|
|
template:
|
|
|
|
metadata:
|
|
|
|
labels:
|
|
|
|
k8s-app: node-problem-detector
|
2017-06-22 00:01:25 +00:00
|
|
|
version: v0.4.1
|
2017-01-19 09:20:43 +00:00
|
|
|
kubernetes.io/cluster-service: "true"
|
|
|
|
spec:
|
|
|
|
containers:
|
|
|
|
- name: node-problem-detector
|
Switch to k8s.gcr.io vanity domain
This is the 2nd attempt. The previous was reverted while we figured out
the regional mirrors (oops).
New plan: k8s.gcr.io is a read-only facade that auto-detects your source
region (us, eu, or asia for now) and pulls from the closest. To publish
an image, push k8s-staging.gcr.io and it will be synced to the regionals
automatically (similar to today). For now the staging is an alias to
gcr.io/google_containers (the legacy URL).
When we move off of google-owned projects (working on it), then we just
do a one-time sync, and change the google-internal config, and nobody
outside should notice.
We can, in parallel, change the auto-sync into a manual sync - send a PR
to "promote" something from staging, and a bot activates it. Nice and
visible, easy to keep track of.
2018-01-17 19:36:53 +00:00
|
|
|
image: k8s.gcr.io/node-problem-detector:v0.4.1
|
2017-01-19 09:20:43 +00:00
|
|
|
command:
|
2017-02-23 03:04:17 +00:00
|
|
|
- "/bin/sh"
|
|
|
|
- "-c"
|
2017-01-19 09:20:43 +00:00
|
|
|
# Pass both config to support both journald and syslog.
|
2017-11-17 20:24:39 +00:00
|
|
|
- "exec /node-problem-detector --logtostderr --system-log-monitors=/config/kernel-monitor.json,/config/kernel-monitor-filelog.json,/config/docker-monitor.json,/config/docker-monitor-filelog.json >>/var/log/node-problem-detector.log 2>&1"
|
2017-01-19 09:20:43 +00:00
|
|
|
securityContext:
|
|
|
|
privileged: true
|
|
|
|
resources:
|
|
|
|
limits:
|
|
|
|
cpu: "200m"
|
|
|
|
memory: "100Mi"
|
|
|
|
requests:
|
|
|
|
cpu: "20m"
|
|
|
|
memory: "20Mi"
|
|
|
|
env:
|
|
|
|
- name: NODE_NAME
|
|
|
|
valueFrom:
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: spec.nodeName
|
|
|
|
volumeMounts:
|
|
|
|
- name: log
|
|
|
|
mountPath: /var/log
|
|
|
|
- name: localtime
|
|
|
|
mountPath: /etc/localtime
|
|
|
|
readOnly: true
|
|
|
|
volumes:
|
|
|
|
- name: log
|
|
|
|
hostPath:
|
|
|
|
path: /var/log/
|
|
|
|
- name: localtime
|
|
|
|
hostPath:
|
|
|
|
path: /etc/localtime
|
2017-06-18 13:34:24 +00:00
|
|
|
type: "FileOrCreate"
|
2017-01-19 09:20:43 +00:00
|
|
|
serviceAccountName: node-problem-detector
|
2017-05-16 10:15:06 +00:00
|
|
|
tolerations:
|
|
|
|
- operator: "Exists"
|
|
|
|
effect: "NoExecute"
|
2017-06-01 02:46:23 +00:00
|
|
|
- key: "CriticalAddonsOnly"
|
|
|
|
operator: "Exists"
|