2017-11-01 00:13:16 +00:00
|
|
|
apiVersion: v1
|
|
|
|
kind: ServiceAccount
|
|
|
|
metadata:
|
|
|
|
name: metadata-proxy
|
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
|
|
|
k8s-app: metadata-proxy
|
|
|
|
kubernetes.io/cluster-service: "true"
|
|
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
|
|
|
---
|
2017-05-01 21:53:40 +00:00
|
|
|
apiVersion: extensions/v1beta1
|
|
|
|
kind: DaemonSet
|
|
|
|
metadata:
|
|
|
|
name: metadata-proxy-v0.1
|
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
|
|
|
k8s-app: metadata-proxy
|
|
|
|
kubernetes.io/cluster-service: "true"
|
|
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
|
|
|
version: v0.1
|
|
|
|
spec:
|
|
|
|
updateStrategy:
|
|
|
|
type: RollingUpdate
|
|
|
|
template:
|
|
|
|
metadata:
|
|
|
|
labels:
|
|
|
|
k8s-app: metadata-proxy
|
|
|
|
kubernetes.io/cluster-service: "true"
|
|
|
|
version: v0.1
|
|
|
|
# This annotation ensures that the proxy does not get evicted if the node
|
|
|
|
# supports critical pod annotation based priority scheme.
|
|
|
|
# Note that this does not guarantee admission on the nodes (#40573).
|
|
|
|
annotations:
|
|
|
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
|
|
|
spec:
|
2018-02-02 03:26:41 +00:00
|
|
|
priorityClassName: system-node-critical
|
2017-11-01 00:13:16 +00:00
|
|
|
serviceAccountName: metadata-proxy
|
2017-05-01 21:53:40 +00:00
|
|
|
hostNetwork: true
|
|
|
|
dnsPolicy: Default
|
|
|
|
containers:
|
|
|
|
- name: metadata-proxy
|
Switch to k8s.gcr.io vanity domain
This is the 2nd attempt. The previous was reverted while we figured out
the regional mirrors (oops).
New plan: k8s.gcr.io is a read-only facade that auto-detects your source
region (us, eu, or asia for now) and pulls from the closest. To publish
an image, push k8s-staging.gcr.io and it will be synced to the regionals
automatically (similar to today). For now the staging is an alias to
gcr.io/google_containers (the legacy URL).
When we move off of google-owned projects (working on it), then we just
do a one-time sync, and change the google-internal config, and nobody
outside should notice.
We can, in parallel, change the auto-sync into a manual sync - send a PR
to "promote" something from staging, and a bot activates it. Nice and
visible, easy to keep track of.
2018-01-17 19:36:53 +00:00
|
|
|
image: k8s.gcr.io/metadata-proxy:v0.1.9
|
2017-05-01 21:53:40 +00:00
|
|
|
securityContext:
|
|
|
|
privileged: true
|
2017-11-15 19:50:25 +00:00
|
|
|
# Request and limit resources to get guaranteed QoS.
|
2017-10-26 20:54:21 +00:00
|
|
|
resources:
|
|
|
|
requests:
|
2017-11-15 19:50:25 +00:00
|
|
|
memory: "25Mi"
|
2017-11-14 15:51:15 +00:00
|
|
|
cpu: "30m"
|
2017-10-26 20:54:21 +00:00
|
|
|
limits:
|
2017-11-15 19:50:25 +00:00
|
|
|
memory: "25Mi"
|
2017-11-14 15:51:15 +00:00
|
|
|
cpu: "30m"
|
2017-10-26 20:54:21 +00:00
|
|
|
# BEGIN_PROMETHEUS_TO_SD
|
|
|
|
- name: prometheus-to-sd-exporter
|
2018-02-13 14:33:57 +00:00
|
|
|
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
2017-11-15 19:50:25 +00:00
|
|
|
# Request and limit resources to get guaranteed QoS.
|
|
|
|
resources:
|
|
|
|
requests:
|
|
|
|
memory: "20Mi"
|
|
|
|
cpu: "2m"
|
|
|
|
limits:
|
|
|
|
memory: "20Mi"
|
|
|
|
cpu: "2m"
|
2017-05-01 21:53:40 +00:00
|
|
|
command:
|
2017-10-26 20:54:21 +00:00
|
|
|
- /monitor
|
|
|
|
- --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons
|
|
|
|
- --api-override={{ prometheus_to_sd_endpoint }}
|
|
|
|
- --source=metadata_proxy:http://127.0.0.1:989?whitelisted=request_count
|
2017-11-14 15:52:55 +00:00
|
|
|
- --pod-id=$(POD_NAME)
|
|
|
|
- --namespace-id=$(POD_NAMESPACE)
|
|
|
|
env:
|
|
|
|
- name: POD_NAME
|
|
|
|
valueFrom:
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: metadata.name
|
|
|
|
- name: POD_NAMESPACE
|
|
|
|
valueFrom:
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: metadata.namespace
|
2017-10-26 20:54:21 +00:00
|
|
|
# END_PROMETHEUS_TO_SD
|
2017-05-01 21:53:40 +00:00
|
|
|
nodeSelector:
|
|
|
|
beta.kubernetes.io/metadata-proxy-ready: "true"
|
|
|
|
terminationGracePeriodSeconds: 30
|