Add RBAC, healthchecks, autoscaler and update Calico to v2.5.0, Typha to 0.4.0

pull/6/head
Gunjan Patel 2017-08-23 18:15:18 -07:00 committed by gunjan5
parent 0d17e9deb7
commit 05661b68eb
21 changed files with 402 additions and 132 deletions

View File

@ -9,6 +9,6 @@ Calico is an implementation of the Kubernetes network policy API. The provided
### Learn More
Learn more about Calico at http://docs.projectcalico.org
Learn more about Calico at https://docs.projectcalico.org
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/calico-policy-controller/README.md?pixel)]()

View File

@ -0,0 +1,67 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- endpoints
verbs:
- get
- apiGroups: [""]
resources:
- services
verbs:
- get
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- globalbgpconfigs
- ippools
- globalnetworkpolicies
verbs:
- create
- get
- list
- update
- patch
- delete
- watch

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: calico
namespace: kube-system

View File

@ -0,0 +1,14 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cpva
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: ["apps", "extensions"]
resources: ["deployments", "daemonsets"]
verbs: ["patch"]

View File

@ -0,0 +1,15 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cpva
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: calico-cpva
namespace: kube-system
roleRef:
kind: ClusterRole
name: calico-cpva
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,8 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: calico-cpva
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -23,12 +23,13 @@ spec:
nodeSelector:
projectcalico.org/ds-ready: "true"
hostNetwork: true
serviceAccountName: calico
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v1.3.0
image: calico/node:v2.5.1
env:
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
@ -46,6 +47,8 @@ spec:
value: "none"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"
- name: FELIX_HEALTHENABLED
value: "true"
- name: IP
value: ""
- name: NO_DEFAULT_POOLS
@ -58,9 +61,18 @@ spec:
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: __CALICO_NODE_CPU__
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
@ -71,7 +83,7 @@ spec:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v1.9.1
image: calico/cni:v1.10.0
command: ["/install-cni.sh"]
env:
- name: CNI_CONF_NAME
@ -103,7 +115,7 @@ spec:
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": false
"snat": true
}
]
}

View File

@ -0,0 +1,22 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-node-vertical-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
node-autoscaler: |-
{
"calico-node": {
"requests": {
"cpu": {
"base": "80m",
"step": "20m",
"nodesPerStep": 10,
"max": "500m"
}
}
}
}

View File

@ -0,0 +1,37 @@
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: calico-node-vertical-autoscaler
namespace: kube-system
labels:
k8s-app: calico-node-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: calico-node-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
name: autoscaler
command:
- /cpvpa
- --target=daemonset/calico-node
- --namespace=kube-system
- --logtostderr=true
- --poll-period-seconds=30
- --v=2
- --config-file=/etc/config/node-autoscaler
volumeMounts:
- name: config
mountPath: /etc/config
volumes:
- name: config
configMap:
name: calico-node-vertical-autoscaler
serviceAccountName: calico-cpva

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -0,0 +1,16 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global BGP Configuration
kind: CustomResourceDefinition
metadata:
name: globalbgpconfigs.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalBGPConfig
plural: globalbgpconfigs
singular: globalbgpconfig

View File

@ -0,0 +1,16 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global Felix Configuration
kind: CustomResourceDefinition
metadata:
name: globalfelixconfigs.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalFelixConfig
plural: globalfelixconfigs
singular: globalfelixconfig

View File

@ -0,0 +1,16 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global Network Policies
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy

View File

@ -0,0 +1,16 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico IP Pools
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool

View File

@ -8,7 +8,6 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: calico-typha
spec:
replicas: __CALICO_TYPHA_REPLICAS__
revisionHistoryLimit: 2
template:
metadata:
@ -21,8 +20,9 @@ spec:
- key: CriticalAddonsOnly
operator: Exists
hostNetwork: true
serviceAccountName: calico
containers:
- image: calico/typha:v0.2.3
- image: calico/typha:v0.4.1
name: calico-typha
ports:
- containerPort: 5473
@ -45,13 +45,23 @@ spec:
value: "kubernetes"
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
value: "1"
- name: TYPHA_HEALTHENABLED
value: "true"
volumeMounts:
- mountPath: /etc/calico
name: etc-calico
readOnly: true
resources:
requests:
cpu: __CALICO_TYPHA_CPU__
livenessProbe:
httpGet:
path: /liveness
port: 9098
periodSeconds: 30
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /readiness
port: 9098
periodSeconds: 10
volumes:
- name: etc-calico
hostPath:

View File

@ -0,0 +1,24 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-typha-horizontal-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
ladder: |-
{
"coresToReplicas": [],
"nodesToReplicas":
[
[1, 1],
[10, 2],
[100, 3],
[250, 4],
[500, 5],
[1000, 6],
[1500, 7],
[2000, 8]
]
}

View File

@ -0,0 +1,33 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-typha-horizontal-autoscaler
namespace: kube-system
labels:
k8s-app: calico-typha-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: calico-typha-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2
name: autoscaler
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=calico-typha-horizontal-autoscaler
- --target=deployment/calico-typha
- --logtostderr=true
- --v=2
resources:
requests:
cpu: 10m
limits:
cpu: 10m

View File

@ -0,0 +1,22 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-typha-vertical-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
typha-autoscaler: |-
{
"calico-typha": {
"requests": {
"cpu": {
"base": "120m",
"step": "80m",
"nodesPerStep": 10,
"max": "1000m"
}
}
}
}

View File

@ -0,0 +1,37 @@
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: calico-typha-vertical-autoscaler
namespace: kube-system
labels:
k8s-app: calico-typha-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: calico-typha-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/cpvpa-amd64:v0.6.0
name: autoscaler
command:
- /cpvpa
- --target=deployment/calico-typha
- --namespace=kube-system
- --logtostderr=true
- --poll-period-seconds=30
- --v=2
- --config-file=/etc/config/typha-autoscaler
volumeMounts:
- name: config
mountPath: /etc/config
volumes:
- name: config
configMap:
name: calico-typha-vertical-autoscaler
serviceAccountName: calico-cpva

View File

@ -34,54 +34,6 @@ function create-dirs {
fi
}
# Vars assumed:
# NUM_NODES
function get-calico-node-cpu {
local suggested_calico_cpus=100m
if [[ "${NUM_NODES}" -gt "10" ]]; then
suggested_calico_cpus=250m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
suggested_calico_cpus=500m
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
suggested_calico_cpus=1000m
fi
echo "${suggested_calico_cpus}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-replicas {
local typha_count=1
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_count=2
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_count=3
fi
if [[ "${NUM_NODES}" -gt "250" ]]; then
typha_count=4
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
typha_count=5
fi
echo "${typha_count}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-cpu {
local typha_cpu=200m
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_cpu=500m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_cpu=1000m
fi
echo "${typha_cpu}"
}
# Create directories referenced in the kube-controller-manager manifest for
# bindmounts. This is used under the rkt runtime to work around
# https://github.com/kubernetes/kubernetes/issues/26816
@ -1363,20 +1315,9 @@ function start-kube-addons {
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
# Configure Calico based on cluster size and image type.
# Configure Calico CNI directory.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/opt/cni/bin@g" "${ds_file}"
sed -i -e "s@__CALICO_NODE_CPU__@$(get-calico-node-cpu)@g" "${ds_file}"
sed -i -e "s@__CALICO_TYPHA_CPU__@$(get-calico-typha-cpu)@g" "${typha_dep_file}"
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@$(get-calico-typha-replicas)@g" "${typha_dep_file}"
else
# If not configured to use Calico, the set the typha replica count to 0, but only if the
# addon is present.
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
if [[ -e $typha_dep_file ]]; then
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@0@g" "${typha_dep_file}"
fi
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"

View File

@ -32,54 +32,6 @@ function setup-os-params {
echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}
# Vars assumed:
# NUM_NODES
function get-calico-node-cpu {
local suggested_calico_cpus=100m
if [[ "${NUM_NODES}" -gt "10" ]]; then
suggested_calico_cpus=250m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
suggested_calico_cpus=500m
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
suggested_calico_cpus=1000m
fi
echo "${suggested_calico_cpus}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-replicas {
local typha_count=1
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_count=2
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_count=3
fi
if [[ "${NUM_NODES}" -gt "250" ]]; then
typha_count=4
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
typha_count=5
fi
echo "${typha_count}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-cpu {
local typha_cpu=200m
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_cpu=500m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_cpu=1000m
fi
echo "${typha_cpu}"
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
# The GCI image has host firewall which drop most inbound/forwarded packets.
@ -1796,20 +1748,9 @@ function start-kube-addons {
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
# Configure Calico based on cluster size and image type.
# Configure Calico CNI directory.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
sed -i -e "s@__CALICO_NODE_CPU__@$(get-calico-node-cpu)@g" "${ds_file}"
sed -i -e "s@__CALICO_TYPHA_CPU__@$(get-calico-typha-cpu)@g" "${typha_dep_file}"
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@$(get-calico-typha-replicas)@g" "${typha_dep_file}"
else
# If not configured to use Calico, the set the typha replica count to 0, but only if the
# addon is present.
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
if [[ -e $typha_dep_file ]]; then
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@0@g" "${typha_dep_file}"
fi
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"