Merge pull request #33774 from MrHohn/kubedns-graceful

Automatic merge from submit-queue

Bump up addon kube-dns to v20 for graceful termination

Below images are built and pushed:
- gcr.io/google_containers/kubedns-amd64:1.8
- gcr.io/google_containers/kubedns-arm:1.8
- gcr.io/google_containers/kubedns-arm64:1.8
- gcr.io/google_containers/kubedns-ppc64le:1.8

Both kubedns and dnsmasq are bumped up in the manifest files.

@thockin @bprashanth
pull/6/head
Kubernetes Submit Queue 2016-09-30 00:15:13 -07:00 committed by GitHub
commit b71def750f
7 changed files with 52 additions and 51 deletions

View File

@ -15,3 +15,6 @@
## Version 1.7 (Wed August 24 2016 Zihong Zheng <zihongz@google.com>)
- Add support for ExternalName services (pr #31159)
## Version 1.8 (Thu September 29 2016 Zihong Zheng <zihongz@google.com>)
- Add support for graceful termination (issue #31807)

View File

@ -22,7 +22,7 @@
# Default registry, arch and tag. This can be overwritten by arguments to make
PLATFORM?=linux
ARCH?=amd64
TAG?=1.7
TAG?=1.8
REGISTRY?=gcr.io/google_containers
GOLANG_VERSION=1.6

View File

@ -19,29 +19,29 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v19
name: kube-dns-v20
namespace: kube-system
labels:
k8s-app: kube-dns
version: v19
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: __PILLAR__DNS__REPLICAS__
selector:
k8s-app: kube-dns
version: v19
version: v20
template:
metadata:
labels:
k8s-app: kube-dns
version: v19
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.7
image: gcr.io/google_containers/kubedns-amd64:1.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -83,7 +83,7 @@ spec:
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
livenessProbe:
httpGet:
path: /healthz-dnsmasq

View File

@ -19,30 +19,29 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v19
name: kube-dns-v20
namespace: kube-system
labels:
k8s-app: kube-dns
version: v19
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: {{ pillar['dns_replicas'] }}
selector:
k8s-app: kube-dns
version: v19
version: v20
template:
metadata:
labels:
k8s-app: kube-dns
version: v19
kubernetes.io/cluster-service: "true"
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.7
image: gcr.io/google_containers/kubedns-amd64:1.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -84,7 +83,16 @@ spec:
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
livenessProbe:
httpGet:
path: /healthz-dnsmasq
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --cache-size=1000
- --no-resolv
@ -97,15 +105,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
httpGet:
path: /healthz-dnsmasq
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
- name: healthz
image: gcr.io/google_containers/exechealthz-amd64:1.2
resources:

View File

@ -19,30 +19,29 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v19
name: kube-dns-v20
namespace: kube-system
labels:
k8s-app: kube-dns
version: v19
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: $DNS_REPLICAS
selector:
k8s-app: kube-dns
version: v19
version: v20
template:
metadata:
labels:
k8s-app: kube-dns
version: v19
kubernetes.io/cluster-service: "true"
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.7
image: gcr.io/google_containers/kubedns-amd64:1.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -83,7 +82,16 @@ spec:
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
livenessProbe:
httpGet:
path: /healthz-dnsmasq
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --cache-size=1000
- --no-resolv
@ -96,15 +104,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
httpGet:
path: /healthz-dnsmasq
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
- name: healthz
image: gcr.io/google_containers/exechealthz-amd64:1.2
resources:

View File

@ -1,29 +1,29 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v19
name: kube-dns-v20
namespace: kube-system
labels:
k8s-app: kube-dns
version: v19
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: ${DNS_REPLICAS}
selector:
k8s-app: kube-dns
version: v19
version: v20
template:
metadata:
labels:
k8s-app: kube-dns
version: v19
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.7
image: gcr.io/google_containers/kubedns-amd64:1.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -64,7 +64,7 @@ spec:
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
livenessProbe:
httpGet:
path: /healthz-dnsmasq

View File

@ -17,29 +17,29 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v19
name: kube-dns-v20
namespace: kube-system
labels:
k8s-app: kube-dns
version: v19
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: {{ pillar['dns_replicas'] }}
selector:
k8s-app: kube-dns
version: v19
version: v20
template:
metadata:
labels:
k8s-app: kube-dns
version: v19
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-{{ arch }}:1.7
image: gcr.io/google_containers/kubedns-{{ arch }}:1.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -82,7 +82,7 @@ spec:
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-{{ arch }}:1.3
image: gcr.io/google_containers/kube-dnsmasq-{{ arch }}:1.4
livenessProbe:
httpGet:
path: /healthz-dnsmasq