Merge pull request #36008 from MrHohn/addon-rc-migrate

Automatic merge from submit-queue

Migrates addons from RCs to Deployments

Fixes #33698.

Below addons are being migrated:
- kube-dns
- GLBC default backend
- Dashboard UI
- Kibana

For the new deployments, the version suffixes are removed from their names. Version related labels are also removed because they are confusing and not needed any more with regard to how Deployment and the new Addon Manager works.

The `replica` field in `kube-dns` Deployment manifest is removed for the incoming DNS horizontal autoscaling feature #33239.

The `replica` field in `Dashboard` Deployment manifest is also removed because the rescheduler e2e test is manually scaling it.

Some resource limit related fields in `heapster-controller.yaml` are removed, as they will be set up by the `addon resizer` containers. Detailed reasons in #34513.

Three e2e tests are modified:
- `rescheduler.go`: Changed to resize Dashboard UI Deployment instead of ReplicationController.
- `addon_update.go`: Some namespace related changes in order to make it compatible with the new Addon Manager.
- `dns_autoscaling.go`: Changed to examine kube-dns Deployment instead of ReplicationController.

Both of above two tests passed on my own cluster. The upgrade process --- from old Addons with RCs to new Addons with Deployments --- was also tested and worked as expected.

The last commit upgrades Addon Manager to v6.0. It is still a work in process and currently waiting for #35220 to be finished. (The Addon Manager image in used comes from a non-official registry but it mostly works except some corner cases.)

@piosz @gmarek could you please review the heapster part and the rescheduler test?

@mikedanese @thockin 

cc @kubernetes/sig-cluster-lifecycle 

---

Notes:
- Kube-dns manifest still uses *-rc.yaml for the new Deployment. The stale file names are preserved here for receiving faster review. May send out PR to re-organize kube-dns's file names after this.
- Heapster Deployment's name remains in the old fashion(with `-v1.2.0` suffix) for avoiding describe this upgrade transition explicitly. In this way we don't need to attach fake apply labels to the old Deployments.
pull/6/head
Kubernetes Submit Queue 2016-11-10 02:36:38 -08:00 committed by GitHub
commit c98fc70195
48 changed files with 173 additions and 244 deletions

View File

@ -1,25 +1,22 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: l7-default-backend-v1.0
name: l7-default-backend
namespace: kube-system
labels:
k8s-app: glbc
version: v1.0
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "GLBC"
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: glbc
version: v1.0
matchLabels:
k8s-app: glbc
template:
metadata:
labels:
k8s-app: glbc
version: v1.0
name: glbc
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: default-http-backend

View File

@ -1,9 +1,6 @@
{% set base_metrics_memory = "140Mi" -%}
{% set metrics_memory = base_metrics_memory -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_cpu = base_metrics_cpu -%}
{% set base_eventer_memory = "190Mi" -%}
{% set eventer_memory = base_eventer_memory -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set eventer_memory_per_node = 500 -%}
@ -11,9 +8,6 @@
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set metrics_memory = (200 + num_nodes * metrics_memory_per_node)|string + "Mi" -%}
{% set metrics_cpu = (80 + num_nodes * metrics_cpu_per_node)|string + "m" -%}
{% set eventer_memory = (200 * 1024 + num_nodes * eventer_memory_per_node)|string + "Ki" -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
@ -51,14 +45,6 @@ spec:
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 5
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: {{ metrics_cpu }}
memory: {{ metrics_memory }}
requests:
cpu: {{ metrics_cpu }}
memory: {{ metrics_memory }}
command:
- /heapster
- --source=kubernetes.summary_api:''
@ -69,14 +55,6 @@ spec:
readOnly: true
- image: gcr.io/google_containers/heapster:v1.2.0
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: {{ eventer_memory }}
requests:
cpu: 100m
memory: {{ eventer_memory }}
command:
- /eventer
- --source=kubernetes:''

View File

@ -1,9 +1,6 @@
{% set base_metrics_memory = "140Mi" -%}
{% set metrics_memory = base_metrics_memory -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_cpu = base_metrics_cpu -%}
{% set base_eventer_memory = "190Mi" -%}
{% set eventer_memory = base_eventer_memory -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set eventer_memory_per_node = 500 -%}
@ -11,9 +8,6 @@
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set metrics_memory = (200 + num_nodes * metrics_memory_per_node)|string + "Mi" -%}
{% set metrics_cpu = (80 + num_nodes * metrics_cpu_per_node)|string + "m" -%}
{% set eventer_memory = (200 * 1024 + num_nodes * eventer_memory_per_node)|string + "Ki" -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
@ -51,14 +45,6 @@ spec:
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 5
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: {{ metrics_cpu }}
memory: {{ metrics_memory }}
requests:
cpu: {{ metrics_cpu }}
memory: {{ metrics_memory }}
command:
- /heapster
- --source=kubernetes.summary_api:''
@ -70,14 +56,6 @@ spec:
readOnly: true
- image: gcr.io/google_containers/heapster:v1.2.0
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: {{ eventer_memory }}
requests:
cpu: 100m
memory: {{ eventer_memory }}
command:
- /eventer
- --source=kubernetes:''

View File

@ -1,9 +1,6 @@
{% set base_metrics_memory = "140Mi" -%}
{% set metrics_memory = base_metrics_memory -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_cpu = base_metrics_cpu -%}
{% set base_eventer_memory = "190Mi" -%}
{% set eventer_memory = base_eventer_memory -%}
{% set metrics_memory_per_node = 4 -%}
{% set metrics_cpu_per_node = 0.5|float -%}
{% set eventer_memory_per_node = 500 -%}
@ -11,9 +8,6 @@
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set metrics_memory = (200 + num_nodes * metrics_memory_per_node)|string + "Mi" -%}
{% set metrics_cpu = (80 + num_nodes * metrics_cpu_per_node)|string + "m" -%}
{% set eventer_memory = (200 * 1024 + num_nodes * eventer_memory_per_node)|string + "Ki" -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% endif -%}
@ -51,28 +45,12 @@ spec:
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 5
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: {{ metrics_cpu }}
memory: {{ metrics_memory }}
requests:
cpu: {{ metrics_cpu }}
memory: {{ metrics_memory }}
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/heapster:v1.2.0
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: {{ eventer_memory }}
requests:
cpu: 100m
memory: {{ eventer_memory }}
command:
- /eventer
- --source=kubernetes:''

View File

@ -1,16 +1,12 @@
{% set base_metrics_memory = "140Mi" -%}
{% set metrics_memory = base_metrics_memory -%}
{% set metrics_memory_per_node = 4 -%}
{% set base_metrics_cpu = "80m" -%}
{% set metrics_cpu = base_metrics_cpu -%}
{% set metrics_cpu_per_node = 0.5 -%}
{% set num_nodes = pillar.get('num_nodes', -1) -%}
{% set nanny_memory = "90Mi" -%}
{% set nanny_memory_per_node = 200 -%}
{% if num_nodes >= 0 -%}
{% set metrics_memory = (200 + num_nodes * metrics_memory_per_node)|string + "Mi" -%}
{% set nanny_memory = (90 * 1024 + num_nodes * nanny_memory_per_node)|string + "Ki" -%}
{% set metrics_cpu = (80 + num_nodes * metrics_cpu_per_node)|string + "m" -%}
{% endif -%}
apiVersion: extensions/v1beta1
@ -47,14 +43,6 @@ spec:
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 5
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: {{ metrics_cpu }}
memory: {{ metrics_memory }}
requests:
cpu: {{ metrics_cpu }}
memory: {{ metrics_memory }}
command:
- /heapster
- --source=kubernetes.summary_api:''

View File

@ -1,23 +1,20 @@
# This file should be kept in sync with cluster/gce/coreos/kube-manifests/addons/dashboard/dashboard-controller.yaml
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kubernetes-dashboard-v1.4.0
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
version: v1.4.0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kubernetes-dashboard
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
version: v1.4.0
kubernetes.io/cluster-service: "true"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'

View File

@ -42,7 +42,7 @@ spec:
- --configmap=kube-dns-autoscaler
- --mode=linear
# Should keep target in sync with cluster/addons/dns/skydns-rc.yaml.base
- --target=ReplicationController/kube-dns-v20
- --target=Deployment/kube-dns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}}

View File

@ -18,25 +18,26 @@
# __MACHINE_GENERATED_WARNING__
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns-v20
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: __PILLAR__DNS__REPLICAS__
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
selector:
k8s-app: kube-dns
version: v20
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'

View File

@ -18,25 +18,26 @@
# Warning: This is a file generated from the base underscore template file: skydns-rc.yaml.base
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns-v20
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: {{ pillar['dns_replicas'] }}
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
selector:
k8s-app: kube-dns
version: v20
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'

View File

@ -18,25 +18,26 @@
# Warning: This is a file generated from the base underscore template file: skydns-rc.yaml.base
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns-v20
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: $DNS_REPLICAS
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
selector:
k8s-app: kube-dns
version: v20
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'

View File

@ -1,5 +1,4 @@
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
s/__PILLAR__DNS__REPLICAS__/{{ pillar['dns_replicas'] }}/g
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
s/__PILLAR__FEDERATIONS__DOMAIN__MAP__/{{ pillar['federations_domain_map'] }}/g
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g

View File

@ -1,5 +1,4 @@
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
s/__PILLAR__DNS__REPLICAS__/$DNS_REPLICAS/g
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
/__PILLAR__FEDERATIONS__DOMAIN__MAP__/d
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g

View File

@ -1,23 +1,20 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kibana-logging-v1
name: kibana-logging
namespace: kube-system
labels:
k8s-app: kibana-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kibana-logging
version: v1
matchLabels:
k8s-app: kibana-logging
template:
metadata:
labels:
k8s-app: kibana-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kibana-logging

View File

@ -120,7 +120,6 @@ fi
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${DNS_SERVER_IP:-10.0.0.10}"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"

View File

@ -106,7 +106,6 @@ fi
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${DNS_SERVER_IP:-10.0.0.10}"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"

View File

@ -605,7 +605,6 @@ ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
ENABLE_CLUSTER_REGISTRY: $(yaml-quote ${ENABLE_CLUSTER_REGISTRY:-false})
CLUSTER_REGISTRY_DISK: $(yaml-quote ${CLUSTER_REGISTRY_DISK:-})
CLUSTER_REGISTRY_DISK_SIZE: $(yaml-quote ${CLUSTER_REGISTRY_DISK_SIZE:-})
DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false})

View File

@ -113,7 +113,6 @@ FEATURE_GATES="${KUBE_FEATURE_GATES:-}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"

View File

@ -140,7 +140,6 @@ fi
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"

View File

@ -439,7 +439,6 @@ logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")'
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
enable_dns_horizontal_autoscaler: '$(echo "$ENABLE_DNS_HORIZONTAL_AUTOSCALER" | sed -e "s/'/''/g")'

View File

@ -1,23 +1,23 @@
apiVersion: v1
kind: ReplicationController
# Keep this file in sync with addons/dashboard/dashboard-controller.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
# Keep this file in sync with addons/dashboard/dashboard-controller.yaml
name: kubernetes-dashboard-v1.4.0
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
version: v1.4.0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kubernetes-dashboard
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
version: v1.4.0
kubernetes.io/cluster-service: "true"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubernetes-dashboard

View File

@ -1,22 +1,23 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns-v20
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: ${DNS_REPLICAS}
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
selector:
k8s-app: kube-dns
version: v20
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'

View File

@ -990,11 +990,8 @@ function start-kube-addons {
setup-addon-manifests "addons" "${file_dir}"
# Replace the salt configurations with variable values.
base_metrics_memory="140Mi"
metrics_memory="${base_metrics_memory}"
base_eventer_memory="190Mi"
base_metrics_cpu="80m"
metrics_cpu="${base_metrics_cpu}"
eventer_memory="${base_eventer_memory}"
nanny_memory="90Mi"
local -r metrics_memory_per_node="4"
local -r metrics_cpu_per_node="0.5"
@ -1002,10 +999,7 @@ function start-kube-addons {
local -r nanny_memory_per_node="200"
if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
num_kube_nodes="$((${NUM_NODES}+1))"
metrics_memory="$((${num_kube_nodes} * ${metrics_memory_per_node} + 200))Mi"
eventer_memory="$((${num_kube_nodes} * ${eventer_memory_per_node} + 200 * 1024))Ki"
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
metrics_cpu=$(echo - | awk "{print ${num_kube_nodes} * ${metrics_cpu_per_node} + 80}")m
fi
controller_yaml="${dst_dir}/${file_dir}"
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
@ -1015,11 +1009,8 @@ function start-kube-addons {
fi
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_memory *}}@${metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu *}}@${metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *eventer_memory *}}@${eventer_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_memory_per_node *}}@${metrics_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
@ -1032,7 +1023,6 @@ function start-kube-addons {
mv "${dst_dir}/dns/skydns-rc.yaml.in" "${dns_rc_file}"
mv "${dst_dir}/dns/skydns-svc.yaml.in" "${dns_svc_file}"
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_replicas'\] *}}@${DNS_REPLICAS}@g" "${dns_rc_file}"
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${dns_rc_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${dns_svc_file}"

View File

@ -831,11 +831,8 @@ start_kube_addons() {
setup_addon_manifests "addons" "${file_dir}"
# Replace the salt configurations with variable values.
base_metrics_memory="140Mi"
metrics_memory="${base_metrics_memory}"
base_eventer_memory="190Mi"
base_metrics_cpu="80m"
metrics_cpu="${base_metrics_cpu}"
eventer_memory="${base_eventer_memory}"
nanny_memory="90Mi"
readonly metrics_memory_per_node="4"
readonly metrics_cpu_per_node="0.5"
@ -843,10 +840,7 @@ start_kube_addons() {
readonly nanny_memory_per_node="200"
if [ -n "${NUM_NODES:-}" ] && [ "${NUM_NODES}" -ge 1 ]; then
num_kube_nodes="$((${NUM_NODES}+1))"
metrics_memory="$((${num_kube_nodes} * ${metrics_memory_per_node} + 200))Mi"
eventer_memory="$((${num_kube_nodes} * ${eventer_memory_per_node} + 200 * 1024))Ki"
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
metrics_cpu=$(echo - | awk "{print ${num_kube_nodes} * ${metrics_cpu_per_node} + 80}")m
fi
controller_yaml="${addon_dst_dir}/${file_dir}"
if [ "${ENABLE_CLUSTER_MONITORING:-}" = "googleinfluxdb" ]; then
@ -856,11 +850,8 @@ start_kube_addons() {
fi
remove_salt_config_comments "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_memory *}}@${metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu *}}@${metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *eventer_memory *}}@${eventer_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_memory_per_node *}}@${metrics_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
@ -879,7 +870,6 @@ start_kube_addons() {
mv "${addon_dst_dir}/dns/skydns-rc.yaml.in" "${dns_rc_file}"
mv "${addon_dst_dir}/dns/skydns-svc.yaml.in" "${dns_svc_file}"
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_replicas'\] *}}@${DNS_REPLICAS}@g" "${dns_rc_file}"
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${dns_rc_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${dns_svc_file}"

View File

@ -76,7 +76,7 @@ endif
cd ${TEMP_DIR} && sed -i.back "s|BASEIMAGE|${BASEIMAGE}|g" Dockerfile
cd ${TEMP_DIR} && sed -i.back "s|CACHEBUST|$(shell uuidgen)|g" Dockerfile
cd ${TEMP_DIR} && sed -i.back "s|-amd64|-${ARCH}|g" addons/singlenode/*.yaml addons/multinode/*.yaml
cd ${TEMP_DIR} && sed -i.back "s|__PILLAR__DNS__REPLICAS__|1|g;s|__PILLAR__DNS__SERVER__|10.0.0.10|g;" addons/singlenode/skydns*.yaml addons/multinode/skydns*.yaml
cd ${TEMP_DIR} && sed -i.back "s|__PILLAR__DNS__SERVER__|10.0.0.10|g" addons/singlenode/skydns*.yaml addons/multinode/skydns*.yaml
cd ${TEMP_DIR} && sed -i.back "s|__PILLAR__DNS__DOMAIN__|cluster.local|g;s|__PILLAR__FEDERATIONS__DOMAIN__MAP__||g;" addons/singlenode/skydns*.yaml addons/multinode/skydns*.yaml
cd ${TEMP_DIR} && rm -f addons/singlenode/*.back addons/multinode/*.back static-pods/*.back

View File

@ -11,7 +11,7 @@
"containers": [
{
"name": "kube-addon-manager",
"image": "REGISTRY/kube-addon-manager-ARCH:v5.2",
"image": "REGISTRY/kube-addon-manager-ARCH:v6.0-alpha.1",
"resources": {
"requests": {
"cpu": "5m",

View File

@ -11,7 +11,7 @@
"containers": [
{
"name": "kube-addon-manager",
"image": "REGISTRY/kube-addon-manager-ARCH:v5.2",
"image": "REGISTRY/kube-addon-manager-ARCH:v6.0-alpha.1",
"resources": {
"requests": {
"cpu": "5m",

View File

@ -330,8 +330,6 @@ def gather_sdn_data():
else:
# There is no SDN cider fall back to the kubernetes config cidr option.
pillar['dns_server'] = get_dns_ip(hookenv.config().get('cidr'))
# The pillar['dns_server'] value is used the kubedns-svc.yaml file.
pillar['dns_replicas'] = 1
# The pillar['dns_domain'] value is used in the kubedns-rc.yaml
pillar['dns_domain'] = hookenv.config().get('dns_domain')
# Use a 'pillar' dictionary so we can reuse the upstream kubedns templates.

View File

@ -14,25 +14,26 @@
# Warning: This is a file generated from the base underscore template file: skydns-rc.yaml.base
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns-v20
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: {{ pillar['dns_replicas'] }}
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
selector:
k8s-app: kube-dns
version: v20
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
version: v20
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'

View File

@ -57,7 +57,6 @@ LOGGING_DESTINATION=elasticsearch
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${SERVICE_CLUSTER_IP_RANGE%.*}.254"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"

View File

@ -1,4 +1,3 @@
s/\"/\\"/g
s/DNS_SERVER_IP/{DNS_SERVER_IP}/g
s/DNS_REPLICAS/{DNS_REPLICAS}/g
s/DNS_DOMAIN/{DNS_DOMAIN}/g

View File

@ -32,7 +32,6 @@ DOCKER_OPTS=""
ENABLE_CLUSTER_DNS=true
DNS_SERVER_IP="10.10.10.10"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"

View File

@ -28,7 +28,7 @@ kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
workspace=$(pwd)
# Process salt pillar templates manually
sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" "${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.in" > "${workspace}/skydns-rc.yaml"
sed -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" "${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.in" > "${workspace}/skydns-rc.yaml"
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/skydns-svc.yaml.in" > "${workspace}/skydns-svc.yaml"
# Federation specific values.
@ -46,6 +46,6 @@ else
sed -i -e "/{{ pillar\['federations_domain_map'\] }}/d" "${workspace}/skydns-rc.yaml"
fi
# Use kubectl to create skydns rc and service
# Use kubectl to create kube-dns controller and service
"${kubectl}" create -f "${workspace}/skydns-rc.yaml"
"${kubectl}" create -f "${workspace}/skydns-svc.yaml"

View File

@ -48,7 +48,6 @@ write_files:
logging_destination: elasticsearch
elasticsearch_replicas: "1"
enable_cluster_dns: "true"
dns_replicas: "1"
dns_server: 10.246.0.10
dns_domain: cluster.local
enable_dns_horizontal_autoscaler: "false"

View File

@ -77,7 +77,6 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.244.240.240"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"

View File

@ -117,7 +117,6 @@ enable_node_logging: "${ENABLE_NODE_LOGGING:false}"
logging_destination: $LOGGING_DESTINATION
elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS
enable_cluster_dns: "${ENABLE_CLUSTER_DNS:-false}"
dns_replicas: ${DNS_REPLICAS:-1}
dns_server: $DNS_SERVER_IP
dns_domain: $DNS_DOMAIN
federations_domain_map: ''

View File

@ -56,7 +56,6 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"

View File

@ -5,13 +5,12 @@ metadata:
namespace: kube-system
labels:
component: kube-addon-manager
version: v4
spec:
hostNetwork: true
containers:
- name: kube-addon-manager
# When updating version also bump it in cluster/images/hyperkube/static-pods/addon-manager.json
image: gcr.io/google-containers/kube-addon-manager:v5.2
image: gcr.io/google-containers/kube-addon-manager:v6.0-alpha.1
command:
- /bin/bash
- -c

View File

@ -116,7 +116,6 @@ ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
# DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE
DNS_SERVER_IP=${DNS_SERVER_IP:-"192.168.3.10"}
DNS_DOMAIN=${DNS_DOMAIN:-"cluster.local"}
DNS_REPLICAS=${DNS_REPLICAS:-1}
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"

View File

@ -41,29 +41,29 @@ function init {
function deploy_dns {
echo "Deploying DNS on Kubernetes"
sed -e "s/\\\$DNS_REPLICAS/${DNS_REPLICAS}/g;s/\\\$DNS_DOMAIN/${DNS_DOMAIN}/g;" "${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.sed" > skydns-rc.yaml
sed -e "s/\\\$DNS_DOMAIN/${DNS_DOMAIN}/g" "${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.sed" > skydns-rc.yaml
sed -e "s/\\\$DNS_SERVER_IP/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/skydns-svc.yaml.sed" > skydns-svc.yaml
KUBEDNS=`eval "${KUBECTL} get services --namespace=kube-system | grep kube-dns | cat"`
if [ ! "$KUBEDNS" ]; then
# use kubectl to create skydns rc and service
${KUBECTL} --namespace=kube-system create -f skydns-rc.yaml
${KUBECTL} --namespace=kube-system create -f skydns-svc.yaml
echo "Kube-dns rc and service is successfully deployed."
echo "Kube-dns controller and service are successfully deployed."
else
echo "Kube-dns rc and service is already deployed. Skipping."
echo "Kube-dns controller and service are already deployed. Skipping."
fi
echo
}
function deploy_dashboard {
if ${KUBECTL} get rc -l k8s-app=kubernetes-dashboard --namespace=kube-system | grep kubernetes-dashboard-v &> /dev/null; then
echo "Kubernetes Dashboard replicationController already exists"
if ${KUBECTL} get deployment -l k8s-app=kubernetes-dashboard --namespace=kube-system | grep kubernetes-dashboard-v &> /dev/null; then
echo "Kubernetes Dashboard controller already exists"
else
echo "Creating Kubernetes Dashboard replicationController"
echo "Creating Kubernetes Dashboard controller"
${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
fi

View File

@ -86,7 +86,6 @@ ENABLE_CPU_CFS_QUOTA="${KUBE_ENABLE_CPU_CFS_QUOTA:-true}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.247.0.10"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"

View File

@ -63,7 +63,6 @@ enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
federations_domain_map: ''

View File

@ -175,7 +175,6 @@ function echo-kube-env() {
echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
echo "DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "DNS_REPLICAS='${DNS_REPLICAS:-}'"
echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"

View File

@ -53,7 +53,6 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.244.240.240"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"

View File

@ -117,7 +117,6 @@ enable_node_logging: "${ENABLE_NODE_LOGGING:false}"
logging_destination: $LOGGING_DESTINATION
elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS
enable_cluster_dns: "${ENABLE_CLUSTER_DNS:-false}"
dns_replicas: ${DNS_REPLICAS:-1}
dns_server: $DNS_SERVER_IP
dns_domain: $DNS_DOMAIN
federations_domain_map: ''

View File

@ -134,6 +134,7 @@ spec:
k8s-app: addon-test
`
// Wrong label case
var invalid_addon_controller_v1 = `
apiVersion: v1
kind: ReplicationController
@ -163,6 +164,7 @@ spec:
protocol: TCP
`
// Wrong label case
var invalid_addon_service_v1 = `
apiVersion: v1
kind: Service
@ -181,9 +183,31 @@ spec:
k8s-app: invalid-addon-test
`
var addonTestPollInterval = 3 * time.Second
var addonTestPollTimeout = 5 * time.Minute
var defaultNsName = api.NamespaceDefault
// Wrong namespace case
var invalid_addon_service_v2 = `
apiVersion: v1
kind: Service
metadata:
name: ivalid-addon-test-v2
namespace: %s
labels:
k8s-app: invalid-addon-test-v2
kubernetes.io/cluster-service: "true"
spec:
ports:
- port: 9377
protocol: TCP
targetPort: 9376
selector:
k8s-app: invalid-addon-test
`
const (
addonTestPollInterval = 3 * time.Second
addonTestPollTimeout = 5 * time.Minute
defaultNsName = api.NamespaceDefault
addonNsName = "kube-system"
)
type stringPair struct {
data, fileName string
@ -240,14 +264,16 @@ var _ = framework.KubeDescribe("Addon update", func() {
svcv1 := "addon-service-v1.yaml"
svcv2 := "addon-service-v2.yaml"
svcInvalid := "invalid-addon-service-v1.yaml"
svcInvalidv2 := "invalid-addon-service-v2.yaml"
var remoteFiles []stringPair = []stringPair{
{fmt.Sprintf(addon_controller_v1, defaultNsName), rcv1},
{fmt.Sprintf(addon_controller_v2, f.Namespace.Name), rcv2},
{fmt.Sprintf(addon_service_v1, f.Namespace.Name), svcv1},
{fmt.Sprintf(addon_service_v2, f.Namespace.Name), svcv2},
{fmt.Sprintf(invalid_addon_controller_v1, f.Namespace.Name), rcInvalid},
{fmt.Sprintf(invalid_addon_service_v1, defaultNsName), svcInvalid},
{fmt.Sprintf(addon_controller_v1, addonNsName), rcv1},
{fmt.Sprintf(addon_controller_v2, addonNsName), rcv2},
{fmt.Sprintf(addon_service_v1, addonNsName), svcv1},
{fmt.Sprintf(addon_service_v2, addonNsName), svcv2},
{fmt.Sprintf(invalid_addon_controller_v1, addonNsName), rcInvalid},
{fmt.Sprintf(invalid_addon_service_v1, addonNsName), svcInvalid},
{fmt.Sprintf(invalid_addon_service_v2, defaultNsName), svcInvalidv2},
}
for _, p := range remoteFiles {
@ -275,8 +301,8 @@ var _ = framework.KubeDescribe("Addon update", func() {
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv1, destinationDir, rcv1))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv1, destinationDir, svcv1))
waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test", true)
waitForReplicationControllerInAddonTest(f.ClientSet, defaultNsName, "addon-test-v1", true)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test", true)
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v1", true)
By("update manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv2, destinationDir, rcv2))
@ -289,27 +315,25 @@ var _ = framework.KubeDescribe("Addon update", func() {
* But it is ok - as long as we don't have rolling update, the result will be the same
*/
waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-updated", true)
waitForReplicationControllerInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-v2", true)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test-updated", true)
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v2", true)
waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test", false)
waitForReplicationControllerInAddonTest(f.ClientSet, defaultNsName, "addon-test-v1", false)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test", false)
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v1", false)
By("remove manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2))
waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-updated", false)
waitForReplicationControllerInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-v2", false)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test-updated", false)
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v2", false)
By("verify invalid API addons weren't created")
_, err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Get("invalid-addon-test-v1")
_, err = f.ClientSet.Core().ReplicationControllers(addonNsName).Get("invalid-addon-test-v1")
Expect(err).To(HaveOccurred())
_, err = f.ClientSet.Core().ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1")
_, err = f.ClientSet.Core().Services(addonNsName).Get("ivalid-addon-test")
Expect(err).To(HaveOccurred())
_, err = f.ClientSet.Core().Services(f.Namespace.Name).Get("ivalid-addon-test")
Expect(err).To(HaveOccurred())
_, err = f.ClientSet.Core().Services(defaultNsName).Get("ivalid-addon-test")
_, err = f.ClientSet.Core().Services(defaultNsName).Get("ivalid-addon-test-v2")
Expect(err).To(HaveOccurred())
// invalid addons will be deleted by the deferred function

View File

@ -180,14 +180,14 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *api.ConfigMap)
func getDNSReplicas(c clientset.Interface) (int, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: KubeDNSLabelName}))
listOpts := api.ListOptions{LabelSelector: label}
rcs, err := c.Core().ReplicationControllers(DNSNamespace).List(listOpts)
deployments, err := c.Extensions().Deployments(DNSNamespace).List(listOpts)
if err != nil {
return 0, err
}
Expect(len(rcs.Items)).Should(Equal(1))
Expect(len(deployments.Items)).Should(Equal(1))
rc := rcs.Items[0]
return int(rc.Spec.Replicas), nil
deployment := deployments.Items[0]
return int(deployment.Spec.Replicas), nil
}
func deleteDNSAutoscalerPod(c clientset.Interface) error {

View File

@ -2642,6 +2642,36 @@ func WaitForRCPodsRunning(c clientset.Interface, ns, rcName string) error {
return nil
}
func ScaleDeployment(clientset clientset.Interface, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling Deployment %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(extensions.Kind("Deployment"), clientset)
if err != nil {
return err
}
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
return fmt.Errorf("error while scaling Deployment %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return WaitForDeploymentPodsRunning(clientset, ns, name)
}
func WaitForDeploymentPodsRunning(c clientset.Interface, ns, name string) error {
deployment, err := c.Extensions().Deployments(ns).Get(name)
if err != nil {
return err
}
selector := labels.SelectorFromSet(labels.Set(deployment.Spec.Selector.MatchLabels))
err = testutils.WaitForPodsWithLabelRunning(c, ns, selector)
if err != nil {
return fmt.Errorf("Error while waiting for Deployment %s pods to be running: %v", name, err)
}
return nil
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) {
PodStore := testutils.NewPodStore(c, ns, label, fields.Everything())

View File

@ -55,16 +55,17 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
listOpts := api.ListOptions{LabelSelector: label}
rcs, err := f.ClientSet.Core().ReplicationControllers(api.NamespaceSystem).List(listOpts)
deployments, err := f.ClientSet.Extensions().Deployments(api.NamespaceSystem).List(listOpts)
framework.ExpectNoError(err)
Expect(len(rcs.Items)).Should(Equal(1))
Expect(len(deployments.Items)).Should(Equal(1))
rc := rcs.Items[0]
replicas := uint(rc.Spec.Replicas)
deployment := deployments.Items[0]
replicas := uint(deployment.Spec.Replicas)
err = framework.ScaleRC(f.ClientSet, api.NamespaceSystem, rc.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleRC(f.ClientSet, api.NamespaceSystem, rc.Name, replicas, true))
err = framework.ScaleDeployment(f.ClientSet, api.NamespaceSystem, deployment.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, api.NamespaceSystem, deployment.Name, replicas, true))
framework.ExpectNoError(err)
})
})