Merge pull request #33862 from luxas/remove_experimental_flannel

Automatic merge from submit-queue

Remove the flannel experimental overlay

<!--  Thanks for sending a pull request!  Here are some tips for you:
1. If this is your first time, read our contributor guidelines https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md and developer guide https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md
2. If you want *faster* PR reviews, read how: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/faster_reviews.md
3. Follow the instructions for writing a release note: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/pull-requests.md#release-notes
-->

**What this PR does / why we need it**:

It removes the deprecated flannel overlay integration in kubelet.
It's completely unnecessary now with CNI which can handle everything smoothly when flannel is running in a daemonset.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #17795, #30589

cc @kubernetes/sig-network @thockin @freehan @bprashanth @yujuhong @dchen1107
pull/6/head
Kubernetes Submit Queue 2016-10-04 21:39:20 -07:00 committed by GitHub
commit 457d3785de
25 changed files with 2594 additions and 3231 deletions

View File

@ -148,7 +148,7 @@ KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
STORAGE_BACKEND=${STORAGE_BACKEND:-etcd2}
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, flannel, kubenet
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"

View File

@ -178,7 +178,7 @@ TEST_CLUSTER="${TEST_CLUSTER:-true}"
STORAGE_BACKEND=${STORAGE_BACKEND:-etcd2}
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, flannel, kubenet
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"

View File

@ -1,112 +0,0 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "flannel-server",
"namespace": "kube-system",
"labels": {
"app": "flannel-server",
"version": "v0.1"
}
},
"spec": {
"volumes": [
{
"name": "varlog",
"hostPath": {
"path": "/var/log"
}
},
{
"name": "etcdstorage",
"hostPath": {
"path": "/var/etcd-flannel"
}
},
{
"name": "networkconfig",
"hostPath": {
"path": "/etc/kubernetes/network.json"
}
}
],
"containers": [
{
"name": "flannel-server-helper",
"image": "gcr.io/google_containers/flannel-server-helper:0.1",
"args": [
"--network-config=/etc/kubernetes/network.json",
"--etcd-prefix=/kubernetes.io/network",
"--etcd-server=http://127.0.0.1:{{ etcd_port }}"
],
"volumeMounts": [
{
"name": "networkconfig",
"mountPath": "/etc/kubernetes/network.json"
}
],
"imagePullPolicy": "Always"
},
{
"name": "flannel-container",
"image": "quay.io/coreos/flannel:0.5.5",
"command": [
"/bin/sh",
"-c",
"/opt/bin/flanneld -listen 0.0.0.0:10253 -etcd-endpoints http://127.0.0.1:{{ etcd_port }} -etcd-prefix /kubernetes.io/network 1>>/var/log/flannel_server.log 2>&1"
],
"ports": [
{
"hostPort": 10253,
"containerPort": 10253
}
],
"resources": {
"requests": {
"cpu": {{ cpulimit }}
}
},
"volumeMounts": [
{
"name": "varlog",
"mountPath": "/var/log"
}
]
},
{
"name": "etcd-container",
"image": "gcr.io/google_containers/etcd:2.2.1",
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:{{ etcd_peer_port }} --advertise-client-urls http://127.0.0.1:{{ etcd_port }} --listen-client-urls http://127.0.0.1:{{ etcd_port }} --data-dir /var/etcd-flannel/data 1>>/var/log/etcd_flannel.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": {{ etcd_port }},
"path": "/health"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"resources": {
"requests": {
"cpu": {{ cpulimit }}
}
},
"volumeMounts": [
{
"name": "varlog",
"mountPath": "/var/log"
},
{
"name": "etcdstorage",
"mountPath": "/var/etcd-flannel"
}
]
}
],
"hostNetwork": true
}
}

View File

@ -1,39 +0,0 @@
touch /var/log/flannel.log:
cmd.run:
- creates: /var/log/flannel.log
touch /var/log/etcd_flannel.log:
cmd.run:
- creates: /var/log/etcd_flannel.log
/var/etcd-flannel:
file.directory:
- user: root
- group: root
- dir_mode: 700
- recurse:
- user
- group
- mode
/etc/kubernetes/network.json:
file.managed:
- source: salt://flannel-server/network.json
- makedirs: True
- user: root
- group: root
- mode: 755
/etc/kubernetes/manifests/flannel-server.manifest:
file.managed:
- source: salt://flannel-server/flannel-server.manifest
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- template: jinja
- context:
etcd_port: 4003
etcd_peer_port: 2382
cpulimit: '"100m"'

View File

@ -1,8 +0,0 @@
{
"Network": "172.16.0.0/12",
"SubnetLen": 24,
"Backend": {
"Type": "vxlan",
"VNI": 1
}
}

View File

@ -1,6 +0,0 @@
{% if grains.api_servers is defined -%}
{% set daemon_args = "-remote " + grains.api_servers + ":10253" -%}
{% else -%}
{% set daemon_args = "-remote 127.0.0.1:10253" -%}
{% endif -%}
DAEMON_ARGS="{{daemon_args}}"

View File

@ -1,44 +0,0 @@
# TODO: Run flannel daemon in a static pod once we've moved the overlay network
# setup into a network plugin.
flannel-tar:
archive:
- extracted
- user: root
- name: /usr/local/src
- makedirs: True
- source: https://storage.googleapis.com/kubernetes-release/flannel/flannel-0.5.5-linux-amd64.tar.gz
- tar_options: v
- source_hash: md5=972c717254775bef528f040af804f2cc
- archive_format: tar
- if_missing: /usr/local/src/flannel/flannel-0.5.5/
flannel-symlink:
file.symlink:
- name: /usr/local/bin/flanneld
- target: /usr/local/src/flannel-0.5.5/flanneld
- force: true
- watch:
- archive: flannel-tar
/etc/default/flannel:
file.managed:
- source: salt://flannel/default
- template: jinja
- user: root
- group: root
- mode: 644
/etc/init.d/flannel:
file.managed:
- source: salt://flannel/initd
- user: root
- group: root
- mode: 755
flannel:
service.running:
- enable: True
- watch:
- file: /usr/local/bin/flanneld
- file: /etc/init.d/flannel
- file: /etc/default/flannel

View File

@ -1,126 +0,0 @@
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: flanneld
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Flannel daemon
# Description:
# Flannel daemon.
### END INIT INFO
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="Flannel overlay network daemon"
NAME=flannel
DAEMON=/usr/local/bin/flanneld
DAEMON_ARGS="--ip-masq"
DAEMON_LOG_FILE=/var/log/$NAME.log
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=root
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Avoid a potential race at boot time when both monit and init.d start
# the same service
PIDS=$(pidof $DAEMON)
for PID in ${PIDS}; do
kill -9 $PID
done
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER -- \
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) log_end_msg 0 ;;
2) exit 1 ;;
esac
;;
status)
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@ -14,11 +14,7 @@
{% if pillar['service_cluster_ip_range'] is defined and pillar['service_cluster_ip_range'] != "" -%}
{% set service_cluster_ip_range = "--service_cluster_ip_range=" + pillar['service_cluster_ip_range'] -%}
{% endif -%}
# When we're using flannel it is responsible for cidr allocation.
# This is expected to be a short-term compromise.
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
{% set allocate_node_cidrs = "--allocate-node-cidrs=false" -%}
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set allocate_node_cidrs = "--allocate-node-cidrs=true" -%}
{% elif pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}

View File

@ -97,16 +97,6 @@
{% set non_masquerade_cidr = "--non-masquerade-cidr=" + pillar.non_masquerade_cidr -%}
{% endif -%}
# The master kubelet cannot wait for the flannel daemon because it is responsible
# for starting up the flannel server in a static pod. So even though the flannel
# daemon runs on the master, it doesn't hold up cluster bootstrap. All the pods
# on the master run with host networking, so the master flannel doesn't care
# even if the network changes. We only need it for the master proxy.
{% set experimental_flannel_overlay = "" -%}
{% if pillar.get('network_provider', '').lower() == 'flannel' and grains['roles'][0] != 'kubernetes-master' %}
{% set experimental_flannel_overlay = "--experimental-flannel-overlay=true" %}
{% endif -%}
# Setup cgroups hierarchies.
{% set cgroup_root = "" -%}
{% set system_container = "" -%}
@ -209,4 +199,4 @@
{% endif -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{feature_gates}} {{test_args}}"
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{feature_gates}} {{test_args}}"

View File

@ -13,9 +13,6 @@ base:
'roles:kubernetes-pool':
- match: grain
- docker
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
- flannel
{% endif %}
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
- cni
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
@ -58,10 +55,7 @@ base:
- match: grain
- generate-cert
- etcd
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
- flannel-server
- flannel
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
- cni
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
- cni

View File

@ -207,8 +207,6 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.Int32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]")
fs.BoolVar(&s.ExperimentalFlannelOverlay, "experimental-flannel-overlay", s.ExperimentalFlannelOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]")
fs.MarkDeprecated("experimental-flannel-overlay", "Will be removed in a future version.")
fs.DurationVar(&s.OutOfDiskTransitionFrequency.Duration, "outofdisk-transition-frequency", s.OutOfDiskTransitionFrequency.Duration, "Duration for which the kubelet has to wait before transitioning out of out-of-disk node condition status. Default: 5m0s")
fs.StringVar(&s.NodeIP, "node-ip", s.NodeIP, "IP address of the node. If set, kubelet will use this IP address for the node")
fs.BoolVar(&s.EnableCustomMetrics, "enable-custom-metrics", s.EnableCustomMetrics, "Support for gathering custom metrics.")

View File

@ -180,7 +180,6 @@ executor-suicide-timeout
exit-on-lock-contention
experimental-allowed-unsafe-sysctls
experimental-bootstrap-kubeconfig
experimental-flannel-overlay
experimental-keystone-url
experimental-nvidia-gpus
experimental-prefix

File diff suppressed because it is too large Load Diff

View File

@ -369,10 +369,6 @@ type KubeletConfiguration struct {
// run docker daemon with version < 1.9 or an Aufs storage backend.
// Issue #10959 has more details.
SerializeImagePulls bool `json:"serializeImagePulls"`
// experimentalFlannelOverlay enables experimental support for starting the
// kubelet with the default overlay network (flannel). Assumes flanneld
// is already running in client mode.
ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"`
// outOfDiskTransitionFrequency is duration for which the kubelet has to
// wait before transitioning out of out-of-disk node condition status.
OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"`

View File

@ -424,10 +424,6 @@ type KubeletConfiguration struct {
// run docker daemon with version < 1.9 or an Aufs storage backend.
// Issue #10959 has more details.
SerializeImagePulls *bool `json:"serializeImagePulls"`
// experimentalFlannelOverlay enables experimental support for starting the
// kubelet with the default overlay network (flannel). Assumes flanneld
// is already running in client mode.
ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"`
// outOfDiskTransitionFrequency is duration for which the kubelet has to
// wait before transitioning out of out-of-disk node condition status.
OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency"`

View File

@ -290,7 +290,6 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
if err := api.Convert_Pointer_bool_To_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil {
return err
}
out.ExperimentalFlannelOverlay = in.ExperimentalFlannelOverlay
out.OutOfDiskTransitionFrequency = in.OutOfDiskTransitionFrequency
out.NodeIP = in.NodeIP
out.NodeLabels = in.NodeLabels
@ -471,7 +470,6 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
if err := api.Convert_bool_To_Pointer_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil {
return err
}
out.ExperimentalFlannelOverlay = in.ExperimentalFlannelOverlay
out.OutOfDiskTransitionFrequency = in.OutOfDiskTransitionFrequency
out.NodeIP = in.NodeIP
out.NodeLabels = in.NodeLabels

View File

@ -331,7 +331,6 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
} else {
out.SerializeImagePulls = nil
}
out.ExperimentalFlannelOverlay = in.ExperimentalFlannelOverlay
out.OutOfDiskTransitionFrequency = in.OutOfDiskTransitionFrequency
out.NodeIP = in.NodeIP
if in.NodeLabels != nil {

View File

@ -297,7 +297,6 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
out.KubeAPIQPS = in.KubeAPIQPS
out.KubeAPIBurst = in.KubeAPIBurst
out.SerializeImagePulls = in.SerializeImagePulls
out.ExperimentalFlannelOverlay = in.ExperimentalFlannelOverlay
out.OutOfDiskTransitionFrequency = in.OutOfDiskTransitionFrequency
out.NodeIP = in.NodeIP
if in.NodeLabels != nil {

View File

@ -2708,13 +2708,6 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
Format: "",
},
},
"experimentalFlannelOverlay": {
SchemaProps: spec.SchemaProps{
Description: "experimentalFlannelOverlay enables experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode.",
Type: []string{"boolean"},
Format: "",
},
},
"outOfDiskTransitionFrequency": {
SchemaProps: spec.SchemaProps{
Description: "outOfDiskTransitionFrequency is duration for which the kubelet has to wait before transitioning out of out-of-disk node condition status.",
@ -2889,7 +2882,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
},
},
},
Required: []string{"TypeMeta", "podManifestPath", "syncFrequency", "fileCheckFrequency", "httpCheckFrequency", "manifestURL", "manifestURLHeader", "enableServer", "address", "port", "readOnlyPort", "tlsCertFile", "tlsPrivateKeyFile", "certDirectory", "hostnameOverride", "podInfraContainerImage", "dockerEndpoint", "rootDirectory", "seccompProfileRoot", "allowPrivileged", "hostNetworkSources", "hostPIDSources", "hostIPCSources", "registryPullQPS", "registryBurst", "eventRecordQPS", "eventBurst", "enableDebuggingHandlers", "minimumGCAge", "maxPerPodContainerCount", "maxContainerCount", "cAdvisorPort", "healthzPort", "healthzBindAddress", "oomScoreAdj", "registerNode", "clusterDomain", "masterServiceNamespace", "clusterDNS", "streamingConnectionIdleTimeout", "nodeStatusUpdateFrequency", "imageMinimumGCAge", "imageGCHighThresholdPercent", "imageGCLowThresholdPercent", "lowDiskSpaceThresholdMB", "volumeStatsAggPeriod", "networkPluginName", "networkPluginMTU", "networkPluginDir", "cniConfDir", "cniBinDir", "volumePluginDir", "containerRuntime", "remoteRuntimeEndpoint", "remoteImageEndpoint", "lockFilePath", "exitOnLockContention", "configureCbr0", "hairpinMode", "babysitDaemons", "maxPods", "nvidiaGPUs", "dockerExecHandlerName", "podCIDR", "resolvConf", "cpuCFSQuota", "containerized", "maxOpenFiles", "reconcileCIDR", "registerSchedulable", "contentType", "kubeAPIQPS", "kubeAPIBurst", "serializeImagePulls", "experimentalFlannelOverlay", "nodeLabels", "nonMasqueradeCIDR", "enableCustomMetrics", "podsPerCore", "enableControllerAttachDetach", "systemReserved", "kubeReserved", "protectKernelDefaults", "makeIPTablesUtilChains", "iptablesMasqueradeBit", "iptablesDropBit"},
Required: []string{"TypeMeta", "podManifestPath", "syncFrequency", "fileCheckFrequency", "httpCheckFrequency", "manifestURL", "manifestURLHeader", "enableServer", "address", "port", "readOnlyPort", "tlsCertFile", "tlsPrivateKeyFile", "certDirectory", "hostnameOverride", "podInfraContainerImage", "dockerEndpoint", "rootDirectory", "seccompProfileRoot", "allowPrivileged", "hostNetworkSources", "hostPIDSources", "hostIPCSources", "registryPullQPS", "registryBurst", "eventRecordQPS", "eventBurst", "enableDebuggingHandlers", "minimumGCAge", "maxPerPodContainerCount", "maxContainerCount", "cAdvisorPort", "healthzPort", "healthzBindAddress", "oomScoreAdj", "registerNode", "clusterDomain", "masterServiceNamespace", "clusterDNS", "streamingConnectionIdleTimeout", "nodeStatusUpdateFrequency", "imageMinimumGCAge", "imageGCHighThresholdPercent", "imageGCLowThresholdPercent", "lowDiskSpaceThresholdMB", "volumeStatsAggPeriod", "networkPluginName", "networkPluginMTU", "networkPluginDir", "cniConfDir", "cniBinDir", "volumePluginDir", "containerRuntime", "remoteRuntimeEndpoint", "remoteImageEndpoint", "lockFilePath", "exitOnLockContention", "configureCbr0", "hairpinMode", "babysitDaemons", "maxPods", "nvidiaGPUs", "dockerExecHandlerName", "podCIDR", "resolvConf", "cpuCFSQuota", "containerized", "maxOpenFiles", "reconcileCIDR", "registerSchedulable", "contentType", "kubeAPIQPS", "kubeAPIBurst", "serializeImagePulls", "nodeLabels", "nonMasqueradeCIDR", "enableCustomMetrics", "podsPerCore", "enableControllerAttachDetach", "systemReserved", "kubeReserved", "protectKernelDefaults", "makeIPTablesUtilChains", "iptablesMasqueradeBit", "iptablesDropBit"},
},
},
Dependencies: []string{
@ -14158,13 +14151,6 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
Format: "",
},
},
"experimentalFlannelOverlay": {
SchemaProps: spec.SchemaProps{
Description: "experimentalFlannelOverlay enables experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode.",
Type: []string{"boolean"},
Format: "",
},
},
"outOfDiskTransitionFrequency": {
SchemaProps: spec.SchemaProps{
Description: "outOfDiskTransitionFrequency is duration for which the kubelet has to wait before transitioning out of out-of-disk node condition status.",
@ -14339,7 +14325,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
},
},
},
Required: []string{"TypeMeta", "podManifestPath", "syncFrequency", "fileCheckFrequency", "httpCheckFrequency", "manifestURL", "manifestURLHeader", "enableServer", "address", "port", "readOnlyPort", "tlsCertFile", "tlsPrivateKeyFile", "certDirectory", "hostnameOverride", "podInfraContainerImage", "dockerEndpoint", "rootDirectory", "seccompProfileRoot", "allowPrivileged", "hostNetworkSources", "hostPIDSources", "hostIPCSources", "registryPullQPS", "registryBurst", "eventRecordQPS", "eventBurst", "enableDebuggingHandlers", "minimumGCAge", "maxPerPodContainerCount", "maxContainerCount", "cAdvisorPort", "healthzPort", "healthzBindAddress", "oomScoreAdj", "registerNode", "clusterDomain", "masterServiceNamespace", "clusterDNS", "streamingConnectionIdleTimeout", "nodeStatusUpdateFrequency", "imageMinimumGCAge", "imageGCHighThresholdPercent", "imageGCLowThresholdPercent", "lowDiskSpaceThresholdMB", "volumeStatsAggPeriod", "networkPluginName", "networkPluginDir", "cniConfDir", "cniBinDir", "networkPluginMTU", "volumePluginDir", "cloudProvider", "cloudConfigFile", "kubeletCgroups", "runtimeCgroups", "systemCgroups", "cgroupRoot", "containerRuntime", "remoteRuntimeEndpoint", "remoteImageEndpoint", "runtimeRequestTimeout", "rktPath", "rktAPIEndpoint", "rktStage1Image", "lockFilePath", "exitOnLockContention", "configureCbr0", "hairpinMode", "babysitDaemons", "maxPods", "nvidiaGPUs", "dockerExecHandlerName", "podCIDR", "resolvConf", "cpuCFSQuota", "containerized", "maxOpenFiles", "reconcileCIDR", "registerSchedulable", "contentType", "kubeAPIQPS", "kubeAPIBurst", "serializeImagePulls", "experimentalFlannelOverlay", "outOfDiskTransitionFrequency", "nodeIP", "nodeLabels", "nonMasqueradeCIDR", "enableCustomMetrics", "evictionHard", "evictionSoft", "evictionSoftGracePeriod", "evictionPressureTransitionPeriod", "evictionMaxPodGracePeriod", "evictionMinimumReclaim", "podsPerCore", "enableControllerAttachDetach", "systemReserved", "kubeReserved", "protectKernelDefaults", "makeIPTablesUtilChains", "iptablesMasqueradeBit", "iptablesDropBit"},
Required: []string{"TypeMeta", "podManifestPath", "syncFrequency", "fileCheckFrequency", "httpCheckFrequency", "manifestURL", "manifestURLHeader", "enableServer", "address", "port", "readOnlyPort", "tlsCertFile", "tlsPrivateKeyFile", "certDirectory", "hostnameOverride", "podInfraContainerImage", "dockerEndpoint", "rootDirectory", "seccompProfileRoot", "allowPrivileged", "hostNetworkSources", "hostPIDSources", "hostIPCSources", "registryPullQPS", "registryBurst", "eventRecordQPS", "eventBurst", "enableDebuggingHandlers", "minimumGCAge", "maxPerPodContainerCount", "maxContainerCount", "cAdvisorPort", "healthzPort", "healthzBindAddress", "oomScoreAdj", "registerNode", "clusterDomain", "masterServiceNamespace", "clusterDNS", "streamingConnectionIdleTimeout", "nodeStatusUpdateFrequency", "imageMinimumGCAge", "imageGCHighThresholdPercent", "imageGCLowThresholdPercent", "lowDiskSpaceThresholdMB", "volumeStatsAggPeriod", "networkPluginName", "networkPluginDir", "cniConfDir", "cniBinDir", "networkPluginMTU", "volumePluginDir", "cloudProvider", "cloudConfigFile", "kubeletCgroups", "runtimeCgroups", "systemCgroups", "cgroupRoot", "containerRuntime", "remoteRuntimeEndpoint", "remoteImageEndpoint", "runtimeRequestTimeout", "rktPath", "rktAPIEndpoint", "rktStage1Image", "lockFilePath", "exitOnLockContention", "configureCbr0", "hairpinMode", "babysitDaemons", "maxPods", "nvidiaGPUs", "dockerExecHandlerName", "podCIDR", "resolvConf", "cpuCFSQuota", "containerized", "maxOpenFiles", "reconcileCIDR", "registerSchedulable", "contentType", "kubeAPIQPS", "kubeAPIBurst", "serializeImagePulls", "outOfDiskTransitionFrequency", "nodeIP", "nodeLabels", "nonMasqueradeCIDR", "enableCustomMetrics", "evictionHard", "evictionSoft", "evictionSoftGracePeriod", "evictionPressureTransitionPeriod", "evictionMaxPodGracePeriod", "evictionMinimumReclaim", "podsPerCore", "enableControllerAttachDetach", "systemReserved", "kubeReserved", "protectKernelDefaults", "makeIPTablesUtilChains", "iptablesMasqueradeBit", "iptablesDropBit"},
},
},
Dependencies: []string{

View File

@ -1,168 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
utilexec "k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"github.com/golang/glog"
)
// TODO: Move all this to a network plugin.
const (
// TODO: The location of default docker options is distro specific, so this
// probably won't work on anything other than debian/ubuntu. This is a
// short-term compromise till we've moved overlay setup into a plugin.
dockerOptsFile = "/etc/default/docker"
flannelSubnetKey = "FLANNEL_SUBNET"
flannelNetworkKey = "FLANNEL_NETWORK"
flannelMtuKey = "FLANNEL_MTU"
dockerOptsKey = "DOCKER_OPTS"
flannelSubnetFile = "/var/run/flannel/subnet.env"
)
// A Kubelet to flannel bridging helper.
type FlannelHelper struct {
subnetFile string
iptablesHelper utiliptables.Interface
}
// NewFlannelHelper creates a new flannel helper.
func NewFlannelHelper() *FlannelHelper {
return &FlannelHelper{
subnetFile: flannelSubnetFile,
iptablesHelper: utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4),
}
}
// Ensure the required MASQUERADE rules exist for the given network/cidr.
func (f *FlannelHelper) ensureFlannelMasqRule(kubeNetwork, podCIDR string) error {
// TODO: Investigate delegation to flannel via -ip-masq=true once flannel
// issue #374 is resolved.
comment := "Flannel masquerade facilitates pod<->node traffic."
args := []string{
"-m", "comment", "--comment", comment,
"!", "-d", kubeNetwork, "-s", podCIDR, "-j", "MASQUERADE",
}
_, err := f.iptablesHelper.EnsureRule(
utiliptables.Append,
utiliptables.TableNAT,
utiliptables.ChainPostrouting,
args...)
return err
}
// Handshake waits for the flannel subnet file and installs a few IPTables
// rules, returning the pod CIDR allocated for this node.
func (f *FlannelHelper) Handshake() (podCIDR string, err error) {
// TODO: Using a file to communicate is brittle
if _, err = os.Stat(f.subnetFile); err != nil {
return "", fmt.Errorf("Waiting for subnet file %v", f.subnetFile)
}
glog.Infof("Found flannel subnet file %v", f.subnetFile)
config, err := parseKVConfig(f.subnetFile)
if err != nil {
return "", err
}
if err = writeDockerOptsFromFlannelConfig(config); err != nil {
return "", err
}
podCIDR, ok := config[flannelSubnetKey]
if !ok {
return "", fmt.Errorf("No flannel subnet, config %+v", config)
}
kubeNetwork, ok := config[flannelNetworkKey]
if !ok {
return "", fmt.Errorf("No flannel network, config %+v", config)
}
if f.ensureFlannelMasqRule(kubeNetwork, podCIDR); err != nil {
return "", fmt.Errorf("Unable to install flannel masquerade %v", err)
}
return podCIDR, nil
}
// Take env variables from flannel subnet env and write to /etc/docker/defaults.
func writeDockerOptsFromFlannelConfig(flannelConfig map[string]string) error {
// TODO: Write dockeropts to unit file on systemd machines
// https://github.com/docker/docker/issues/9889
mtu, ok := flannelConfig[flannelMtuKey]
if !ok {
return fmt.Errorf("No flannel mtu, flannel config %+v", flannelConfig)
}
dockerOpts, err := parseKVConfig(dockerOptsFile)
if err != nil {
return err
}
opts, ok := dockerOpts[dockerOptsKey]
if !ok {
glog.Errorf("Did not find docker opts, writing them")
opts = fmt.Sprintf(
" --bridge=cbr0 --iptables=false --ip-masq=false")
} else {
opts, _ = strconv.Unquote(opts)
}
dockerOpts[dockerOptsKey] = fmt.Sprintf("\"%v --mtu=%v\"", opts, mtu)
if err = writeKVConfig(dockerOptsFile, dockerOpts); err != nil {
return err
}
return nil
}
// parseKVConfig takes a file with key-value env variables and returns a dictionary mapping the same.
func parseKVConfig(filename string) (map[string]string, error) {
config := map[string]string{}
if _, err := os.Stat(filename); err != nil {
return config, err
}
buff, err := ioutil.ReadFile(filename)
if err != nil {
return config, err
}
str := string(buff)
glog.Infof("Read kv options %+v from %v", str, filename)
for _, line := range strings.Split(str, "\n") {
kv := strings.Split(line, "=")
if len(kv) != 2 {
glog.Warningf("Ignoring non key-value pair %v", kv)
continue
}
config[string(kv[0])] = string(kv[1])
}
return config, nil
}
// writeKVConfig writes a kv map as env variables into the given file.
func writeKVConfig(filename string, kv map[string]string) error {
if _, err := os.Stat(filename); err != nil {
return err
}
content := ""
for k, v := range kv {
content += fmt.Sprintf("%v=%v\n", k, v)
}
glog.Warningf("Writing kv options %+v to %v", content, filename)
return ioutil.WriteFile(filename, []byte(content), 0644)
}

View File

@ -392,10 +392,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
// TODO(mtaufen): remove when internal cbr0 implementation gets removed in favor
// of the kubenet network plugin
var myConfigureCBR0 bool = kubeCfg.ConfigureCBR0
var myFlannelExperimentalOverlay bool = kubeCfg.ExperimentalFlannelOverlay
if kubeCfg.NetworkPluginName == "kubenet" {
myConfigureCBR0 = false
myFlannelExperimentalOverlay = false
}
klet := &Kubelet{
@ -426,27 +424,25 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
nodeRef: nodeRef,
nodeLabels: kubeCfg.NodeLabels,
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
os: kubeDeps.OSInterface,
oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer,
configureCBR0: myConfigureCBR0,
nonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
reconcileCIDR: kubeCfg.ReconcileCIDR,
maxPods: int(kubeCfg.MaxPods),
podsPerCore: int(kubeCfg.PodsPerCore),
nvidiaGPUs: int(kubeCfg.NvidiaGPUs),
syncLoopMonitor: atomic.Value{},
resolverConfig: kubeCfg.ResolverConfig,
cpuCFSQuota: kubeCfg.CPUCFSQuota,
daemonEndpoints: daemonEndpoints,
containerManager: kubeDeps.ContainerManager,
flannelExperimentalOverlay: myFlannelExperimentalOverlay,
flannelHelper: nil,
nodeIP: net.ParseIP(kubeCfg.NodeIP),
clock: clock.RealClock{},
os: kubeDeps.OSInterface,
oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer,
configureCBR0: myConfigureCBR0,
nonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
reconcileCIDR: kubeCfg.ReconcileCIDR,
maxPods: int(kubeCfg.MaxPods),
podsPerCore: int(kubeCfg.PodsPerCore),
nvidiaGPUs: int(kubeCfg.NvidiaGPUs),
syncLoopMonitor: atomic.Value{},
resolverConfig: kubeCfg.ResolverConfig,
cpuCFSQuota: kubeCfg.CPUCFSQuota,
daemonEndpoints: daemonEndpoints,
containerManager: kubeDeps.ContainerManager,
nodeIP: net.ParseIP(kubeCfg.NodeIP),
clock: clock.RealClock{},
outOfDiskTransitionFrequency: kubeCfg.OutOfDiskTransitionFrequency.Duration,
reservation: *reservation,
enableCustomMetrics: kubeCfg.EnableCustomMetrics,
@ -458,11 +454,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
iptablesDropBit: int(kubeCfg.IPTablesDropBit),
}
if klet.flannelExperimentalOverlay {
klet.flannelHelper = NewFlannelHelper()
glog.Infof("Flannel is in charge of podCIDR and overlay networking.")
}
if mode, err := effectiveHairpinMode(componentconfig.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, kubeCfg.ConfigureCBR0, kubeCfg.NetworkPluginName); err != nil {
// This is a non-recoverable error. Returning it up the callstack will just
// lead to retries of the same failure, so just fail hard.
@ -965,15 +956,6 @@ type Kubelet struct {
// oneTimeInitializer is used to initialize modules that are dependent on the runtime to be up.
oneTimeInitializer sync.Once
// flannelExperimentalOverlay determines whether the experimental flannel
// network overlay is active.
flannelExperimentalOverlay bool
// TODO: Flannelhelper doesn't store any state, we can instantiate it
// on the fly if we're confident the dbus connetions it opens doesn't
// put the system under duress.
flannelHelper *FlannelHelper
// If non-nil, use this IP address for the node
nodeIP net.IP

View File

@ -88,7 +88,7 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
// is used or whether we are using overlay networking. We should return
// true for cloud providers if they implement Routes() interface and
// we are not using overlay networking.
if kl.cloud == nil || kl.cloud.ProviderName() != "gce" || kl.flannelExperimentalOverlay {
if kl.cloud == nil || kl.cloud.ProviderName() != "gce" {
return false
}
_, supported := kl.cloud.Routes()
@ -224,22 +224,13 @@ func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
// syncNetworkStatus updates the network state, ensuring that the network is
// configured correctly if the kubelet is set to configure cbr0:
// * handshake flannel helper if the flannel experimental overlay is being used.
// * ensure that iptables masq rules are setup
// * reconcile cbr0 with the pod CIDR
func (kl *Kubelet) syncNetworkStatus() {
var err error
if kl.configureCBR0 {
if kl.flannelExperimentalOverlay {
podCIDR, err := kl.flannelHelper.Handshake()
if err != nil {
glog.Infof("Flannel server handshake failed %v", err)
return
}
kl.updatePodCIDR(podCIDR)
}
if err := ensureIPTablesMasqRule(kl.iptClient, kl.nonMasqueradeCIDR); err != nil {
err = fmt.Errorf("Error on adding ip table rules: %v", err)
err = fmt.Errorf("Error on adding iptables rules: %v", err)
glog.Error(err)
kl.runtimeState.setNetworkState(err)
return

View File

@ -317,22 +317,7 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
return fmt.Errorf("no node instance returned for %q", kl.nodeName)
}
// Flannel is the authoritative source of pod CIDR, if it's running.
// This is a short term compromise till we get flannel working in
// reservation mode.
if kl.flannelExperimentalOverlay {
flannelPodCIDR := kl.runtimeState.podCIDR()
if node.Spec.PodCIDR != flannelPodCIDR {
node.Spec.PodCIDR = flannelPodCIDR
glog.Infof("Updating podcidr to %v", node.Spec.PodCIDR)
if updatedNode, err := kl.kubeClient.Core().Nodes().Update(node); err != nil {
glog.Warningf("Failed to update podCIDR: %v", err)
} else {
// Update the node resourceVersion so the status update doesn't fail.
node = updatedNode
}
}
} else if kl.reconcileCIDR {
if kl.reconcileCIDR {
kl.updatePodCIDR(node.Spec.PodCIDR)
}

View File

@ -29,8 +29,6 @@ const (
// ControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
ControllerManagerPort = 10252
// Port for flannel daemon.
FlannelDaemonPort = 10253
// KubeletReadOnlyPort exposes basic read-only services from the kubelet.
// May be overridden by a flag at startup.
// This is necessary for heapster to collect monitoring stats from the kubelet