Add a network plugin that duplicates "configureCBR0" functionality

pull/6/head
Dan Williams 2015-12-16 17:31:10 -06:00
parent 67414afd11
commit fabb65c13f
18 changed files with 502 additions and 11 deletions

View File

@ -112,8 +112,8 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,Pe
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, flannel
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, flannel, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"

View File

@ -0,0 +1,20 @@
/opt/cni:
file.directory:
- user: root
- group: root
- mode: 755
- makedirs: True
# These are all available CNI network plugins.
cni-tar:
archive:
- extracted
- user: root
- name: /opt/cni
- makedirs: True
- source: https://storage.googleapis.com/kubernetes-release/network-plugins/cni-09214926.tar.gz
- tar_options: v
- source_hash: md5=58f8631f912dd88be6a0920775db7274
- archive_format: tar
- if_missing: /opt/cni/bin

View File

@ -3,5 +3,9 @@
{% if pillar.get('e2e_storage_test_environment', '').lower() == 'true' -%}
{% set e2e_opts = '-s devicemapper' -%}
{% endif -%}
DOCKER_OPTS="{{grains_opts}} {{e2e_opts}} --bridge=cbr0 --iptables=false --ip-masq=false --log-level=warn"
{% set bridge_opts = "--bridge=cbr0" %}
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set bridge_opts = "" %}
{% endif -%}
DOCKER_OPTS="{{grains_opts}} {{e2e_opts}} {{bridge_opts}} --iptables=false --ip-masq=false --log-level=warn"
DOCKER_NOFILE=1000000

View File

@ -14,6 +14,8 @@
# This is expected to be a short-term compromise.
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
{% set allocate_node_cidrs = "--allocate-node-cidrs=false" -%}
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set allocate_node_cidrs = "--allocate-node-cidrs=true" -%}
{% elif pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
{% endif -%}

View File

@ -26,6 +26,7 @@
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
{% set reconcile_cidr_args = "" -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere'] -%}
@ -33,7 +34,8 @@
# running on the master.
{% if grains.kubelet_api_servers is defined -%}
{% set api_servers_with_port = "--api-servers=https://" + grains.kubelet_api_servers -%}
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false --reconcile-cidr=false" -%}
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false" -%}
{% set reconcile_cidr_args = "--reconcile-cidr=false" -%}
{% else -%}
{% set api_servers_with_port = "" -%}
{% endif -%}
@ -116,8 +118,13 @@
{% endif -%}
{% set pod_cidr = "" %}
{% if grains['roles'][0] == 'kubernetes-master' and grains.get('cbr-cidr') %}
{% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %}
{% if grains['roles'][0] == 'kubernetes-master' %}
{% if grains.get('cbr-cidr') %}
{% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %}
{% elif api_servers_with_port == '' and pillar.get('network_provider', '').lower() == 'kubenet' %}
# Kubelet standalone mode needs a PodCIDR since there is no controller-manager
{% set pod_cidr = "--pod-cidr=10.76.0.0/16" %}
{% endif -%}
{% endif %}
{% set cpu_cfs_quota = "" %}
@ -133,6 +140,11 @@
{% set network_plugin = "" -%}
{% if pillar.get('network_provider', '').lower() == 'opencontrail' %}
{% set network_plugin = "--network-plugin=opencontrail" %}
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set network_plugin = "--network-plugin=kubenet" -%}
{% if reconcile_cidr_args == '' -%}
{% set reconcile_cidr_args = "--reconcile-cidr=true" -%}
{% endif -%}
{% endif -%}
{% set kubelet_port = "" -%}
@ -146,4 +158,4 @@
{% endif -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{test_args}}"
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{test_args}}"

View File

@ -15,6 +15,8 @@ base:
- docker
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
- flannel
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
- cni
{% endif %}
- helpers
- cadvisor
@ -46,6 +48,8 @@ base:
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
- flannel-server
- flannel
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
- cni
{% endif %}
- kube-apiserver
- kube-controller-manager

View File

@ -97,8 +97,12 @@ octets=($(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e 's|/.*||' -e 's/\./ /g'))
service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
MASTER_EXTRA_SANS="IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, kubenet, etc
if [ "${NETWORK_PROVIDER}" == "kubenet" ]; then
CLUSTER_IP_RANGE="${CONTAINER_SUBNET}"
fi
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"

View File

@ -57,7 +57,9 @@ echo "127.0.0.1 localhost" >> /etc/hosts # enables cmds like 'kubectl get pods'
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
# Configure the master network
provision-network-master
if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then
provision-network-master
fi
write-salt-config kubernetes-master

View File

@ -53,7 +53,9 @@ for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
done
# Configure network
provision-network-node
if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then
provision-network-node
fi
write-salt-config kubernetes-pool

View File

@ -38,6 +38,7 @@ instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")'
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")'

View File

@ -153,6 +153,7 @@ function echo-kube-env() {
echo "NODE_NAMES=(${NODE_NAMES[@]})"
echo "NODE_IPS=(${NODE_IPS[@]})"
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "CLUSTER_IP_RANGE='${CLUSTER_IP_RANGE}'"
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'"
echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/kubelet/network/cni"
"k8s.io/kubernetes/pkg/kubelet/network/exec"
"k8s.io/kubernetes/pkg/kubelet/network/kubenet"
// Volume plugins
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/aws_ebs"
@ -88,6 +89,7 @@ func ProbeNetworkPlugins(pluginDir string) []network.NetworkPlugin {
// for each existing plugin, add to the list
allPlugins = append(allPlugins, exec.ProbeNetworkPlugins(pluginDir)...)
allPlugins = append(allPlugins, cni.ProbeNetworkPlugins(pluginDir)...)
allPlugins = append(allPlugins, kubenet.NewPlugin())
return allPlugins
}

View File

@ -0,0 +1,66 @@
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Network Plugins
__Disclaimer__: Network plugins are in alpha. Its contents will change rapidly.
Network plugins in Kubernetes come in a few flavors:
* Plain vanilla exec plugins - deprecated in favor of CNI plugins.
* CNI plugins: adhere to the appc/CNI specification, designed for interoperability.
* Kubenet plugin: implements basic `cbr0` using the `bridge` and `host-local` CNI plugins
## Installation
The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it found, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for docker, as rkt manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins:
* `network-plugin-dir`: Kubelet probes this directory for plugins on startup
* `network-plugin`: The network plugin to use from `network-plugin-dir`. It must match the name reported by a plugin probed from the plugin directory. For CNI plugins, this is simply "cni".
### Exec
Place plugins in `network-plugin-dir/plugin-name/plugin-name`, i.e if you have a bridge plugin and `network-plugin-dir` is `/usr/lib/kubernetes`, you'd place the bridge plugin executable at `/usr/lib/kubernetes/bridge/bridge`. See [this comment](../../pkg/kubelet/network/exec/exec.go) for more details.
### CNI
The CNI plugin is selected by passing Kubelet the `--network-plugin=cni` command-line option. Kubelet reads the first CNI configuration file from `--network-plugin-dir` and uses the CNI configuration from that file to set up each pod's network. The CNI configuration file must match the [CNI specification](https://github.com/appc/cni/blob/master/SPEC.md), and any required CNI plugins referenced by the configuration must be present in `/opt/cni/bin`.
### kubenet
The Linux-only kubenet plugin provides functionality similar to the `--configure-cbr0` kubelet command-line option. It creates a Linux bridge named `cbr0` and creates a veth pair for each pod with the host end of each pair connected to `cbr0`. The pod end of the pair is assigned an IP address allocated from a range assigned to the node through either configuration or by the controller-manager. `cbr0` is assigned an MTU matching the smallest MTU of an enabled normal interface on the host. The kubenet plugin is currently mutually exclusive with, and will eventually replace, the --configure-cbr0 option. It is also currently incompatible with the flannel experimental overlay.
The plugin requires a few things:
* The standard CNI `bridge` and `host-local` plugins to be placed in `/opt/cni/bin`.
* Kubelet must be run with the `--network-plugin=kubenet` argument to enable the plugin
* Kubelet must also be run with the `--reconcile-cidr` argument to ensure the IP subnet assigned to the node by configuration or the controller-manager is propagated to the plugin
* The node must be assigned an IP subnet through either the `--pod-cidr` kubelet command-line option or the `--allocate-node-cidrs=true --cluster-cidr=<cidr>` controller-manager command-line options.
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/admin/network-plugins.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@ -24,6 +24,7 @@ DOCKERIZE_KUBELET=${DOCKERIZE_KUBELET:-""}
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
ALLOW_SECURITY_CONTEXT=${ALLOW_SECURITY_CONTEXT:-""}
RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
NET_PLUGIN=${NET_PLUGIN:-""}
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
cd "${KUBE_ROOT}"
@ -255,12 +256,18 @@ function start_apiserver {
}
function start_controller_manager {
node_cidr_args=""
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 "
fi
CTLRMGR_LOG=/tmp/kube-controller-manager.log
sudo -E "${GO_OUT}/kube-controller-manager" \
--v=${LOG_LEVEL} \
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
--root-ca-file="${ROOT_CA_FILE}" \
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
${node_cidr_args} \
--master="${API_HOST}:${API_PORT}" >"${CTLRMGR_LOG}" 2>&1 &
CTLRMGR_PID=$!
}
@ -283,6 +290,16 @@ function start_kubelet {
fi
fi
net_plugin_args=""
if [[ -n "${NET_PLUGIN}" ]]; then
net_plugin_args="--network-plugin=${NET_PLUGIN}"
fi
kubenet_plugin_args=""
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
kubenet_plugin_args="--reconcile-cidr=true "
fi
sudo -E "${GO_OUT}/kubelet" ${priv_arg}\
--v=${LOG_LEVEL} \
--chaos-chance="${CHAOS_CHANCE}" \
@ -294,6 +311,8 @@ function start_kubelet {
--api-servers="${API_HOST}:${API_PORT}" \
--cpu-cfs-quota=${CPU_CFS_QUOTA} \
--cluster-dns="127.0.0.1" \
${net_plugin_args} \
${kubenet_plugin_args} \
--port="$KUBELET_PORT" >"${KUBELET_LOG}" 2>&1 &
KUBELET_PID=$!
else

View File

@ -1654,7 +1654,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do
netNamespace := ""
var ports []api.ContainerPort
if dm.networkPlugin.Name() == "cni" {
if dm.networkPlugin.Name() == "cni" || dm.networkPlugin.Name() == "kubenet" {
netNamespace = "none"
}

View File

@ -269,6 +269,13 @@ func NewMainKubelet(
oomWatcher := NewOOMWatcher(cadvisorInterface, recorder)
// TODO: remove when internal cbr0 implementation gets removed in favor
// of the kubenet network plugin
if networkPluginName == "kubenet" {
configureCBR0 = false
flannelExperimentalOverlay = false
}
klet := &Kubelet{
hostname: hostname,
nodeName: nodeName,
@ -638,6 +645,7 @@ type Kubelet struct {
resolverConfig string
// Optionally shape the bandwidth of a pod
// TODO: remove when kubenet plugin is ready
shaper bandwidth.BandwidthShaper
// True if container cpu limits should be enforced via cgroup CFS quota
@ -2595,6 +2603,8 @@ func (kl *Kubelet) updateRuntimeUp() {
kl.runtimeState.setRuntimeSync(kl.clock.Now())
}
// TODO: remove when kubenet plugin is ready
// NOTE!!! if you make changes here, also make them to kubenet
func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
if podCIDR == "" {
glog.V(5).Info("PodCIDR not set. Will not configure cbr0.")

View File

@ -0,0 +1,287 @@
// +build linux
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubenet
import (
"fmt"
"net"
"strings"
"syscall"
"github.com/vishvananda/netlink"
"github.com/appc/cni/libcni"
"github.com/golang/glog"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/util/bandwidth"
)
const (
KubenetPluginName = "kubenet"
BridgeName = "cbr0"
DefaultCNIDir = "/opt/cni/bin"
DefaultInterfaceName = "eth0"
)
type kubenetNetworkPlugin struct {
host network.Host
netConfig *libcni.NetworkConfig
cniConfig *libcni.CNIConfig
shaper bandwidth.BandwidthShaper
podCIDRs map[kubecontainer.DockerID]string
MTU int
}
func NewPlugin() network.NetworkPlugin {
return &kubenetNetworkPlugin{
podCIDRs: make(map[kubecontainer.DockerID]string),
MTU: 1460,
}
}
func (plugin *kubenetNetworkPlugin) Init(host network.Host) error {
plugin.host = host
plugin.cniConfig = &libcni.CNIConfig{
Path: []string{DefaultCNIDir},
}
if link, err := findMinMTU(); err == nil {
plugin.MTU = link.MTU
glog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU)
} else {
glog.Warningf("Failed to find default bridge MTU: %v", err)
}
return nil
}
func findMinMTU() (*net.Interface, error) {
intfs, err := net.Interfaces()
if err != nil {
return nil, err
}
mtu := 999999
defIntfIndex := -1
for i, intf := range intfs {
if ((intf.Flags & net.FlagUp) != 0) && (intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) == 0) {
if intf.MTU < mtu {
mtu = intf.MTU
defIntfIndex = i
}
}
}
if mtu >= 999999 || mtu < 576 || defIntfIndex < 0 {
return nil, fmt.Errorf("no suitable interface", BridgeName)
}
return &intfs[defIntfIndex], nil
}
const NET_CONFIG_TEMPLATE = `{
"cniVersion": "0.1.0",
"name": "kubenet",
"type": "bridge",
"bridge": "%s",
"mtu": %d,
"addIf": "%s",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "%s",
"gateway": "%s",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
}`
func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interface{}) {
if name != network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE {
return
}
podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string)
if !ok {
glog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE)
return
}
if plugin.netConfig != nil {
glog.V(5).Infof("Ignoring subsequent pod CIDR update to %s", podCIDR)
return
}
glog.V(5).Infof("PodCIDR is set to %q", podCIDR)
_, cidr, err := net.ParseCIDR(podCIDR)
if err == nil {
// Set bridge address to first address in IPNet
cidr.IP.To4()[3] += 1
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.MTU, DefaultInterfaceName, podCIDR, cidr.IP.String())
plugin.netConfig, err = libcni.ConfFromBytes([]byte(json))
if err == nil {
glog.V(5).Infof("CNI network config:\n%s", json)
// Ensure cbr0 has no conflicting addresses; CNI's 'bridge'
// plugin will bail out if the bridge has an unexpected one
plugin.clearBridgeAddressesExcept(cidr.IP.String())
}
}
if err != nil {
glog.Warningf("Failed to generate CNI network config: %v", err)
}
}
func (plugin *kubenetNetworkPlugin) clearBridgeAddressesExcept(keep string) {
bridge, err := netlink.LinkByName(BridgeName)
if err != nil {
return
}
addrs, err := netlink.AddrList(bridge, syscall.AF_INET)
if err != nil {
return
}
for _, addr := range addrs {
if addr.IPNet.String() != keep {
glog.V(5).Infof("Removing old address %s from %s", addr.IPNet.String(), BridgeName)
netlink.AddrDel(bridge, &addr)
}
}
}
func (plugin *kubenetNetworkPlugin) Name() string {
return KubenetPluginName
}
func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.DockerID) error {
// Can't set up pods if we don't have a PodCIDR yet
if plugin.netConfig == nil {
return fmt.Errorf("Kubenet needs a PodCIDR to set up pods")
}
runtime, ok := plugin.host.GetRuntime().(*dockertools.DockerManager)
if !ok {
return fmt.Errorf("Kubenet execution called on non-docker runtime")
}
netnsPath, err := runtime.GetNetNS(id.ContainerID())
if err != nil {
return err
}
rt := buildCNIRuntimeConf(name, namespace, id.ContainerID(), netnsPath)
if err != nil {
return fmt.Errorf("Error building CNI config: %v", err)
}
res, err := plugin.cniConfig.AddNetwork(plugin.netConfig, rt)
if err != nil {
return fmt.Errorf("Error adding container to network: %v", err)
}
if res.IP4 == nil {
return fmt.Errorf("CNI plugin reported no IPv4 address for container %v.", id)
}
plugin.podCIDRs[id] = res.IP4.IP.String()
// The first SetUpPod call creates the bridge; ensure shaping is enabled
if plugin.shaper == nil {
plugin.shaper = bandwidth.NewTCShaper(BridgeName)
if plugin.shaper == nil {
return fmt.Errorf("Failed to create bandwidth shaper!")
}
plugin.shaper.ReconcileInterface()
}
// TODO: get ingress/egress from Pod.Spec and add pod CIDR to shaper
return nil
}
func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.DockerID) error {
if plugin.netConfig == nil {
return fmt.Errorf("Kubenet needs a PodCIDR to tear down pods")
}
runtime, ok := plugin.host.GetRuntime().(*dockertools.DockerManager)
if !ok {
return fmt.Errorf("Kubenet execution called on non-docker runtime")
}
netnsPath, err := runtime.GetNetNS(id.ContainerID())
if err != nil {
return err
}
rt := buildCNIRuntimeConf(name, namespace, id.ContainerID(), netnsPath)
if err != nil {
return fmt.Errorf("Error building CNI config: %v", err)
}
// no cached CIDR is Ok during teardown
if cidr, ok := plugin.podCIDRs[id]; ok {
glog.V(5).Infof("Removing pod CIDR %s from shaper", cidr)
// shaper wants /32
if addr, _, err := net.ParseCIDR(cidr); err != nil {
if err = plugin.shaper.Reset(fmt.Sprintf("%s/32", addr.String())); err != nil {
glog.Warningf("Failed to remove pod CIDR %s from shaper: %v", cidr, err)
}
}
}
delete(plugin.podCIDRs, id)
if err := plugin.cniConfig.DelNetwork(plugin.netConfig, rt); err != nil {
return fmt.Errorf("Error removing container from network: %v", err)
}
return nil
}
// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin.
// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls
func (plugin *kubenetNetworkPlugin) Status(namespace string, name string, id kubecontainer.DockerID) (*network.PodNetworkStatus, error) {
cidr, ok := plugin.podCIDRs[id]
if !ok {
return nil, fmt.Errorf("No IP address found for pod %v", id)
}
ip, _, err := net.ParseCIDR(strings.Trim(cidr, "\n"))
if err != nil {
return nil, err
}
return &network.PodNetworkStatus{IP: ip}, nil
}
func buildCNIRuntimeConf(podName string, podNs string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) *libcni.RuntimeConf {
glog.V(4).Infof("Kubenet: using netns path %v", podNetnsPath)
glog.V(4).Infof("Kubenet: using podns path %v", podNs)
return &libcni.RuntimeConf{
ContainerID: podInfraContainerID.ID,
NetNS: podNetnsPath,
IfName: DefaultInterfaceName,
}
}

View File

@ -0,0 +1,55 @@
// +build !linux
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubenet
import (
"fmt"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
)
type kubenetNetworkPlugin struct {
}
func NewPlugin() network.NetworkPlugin {
return &kubenetNetworkPlugin{}
}
func (plugin *kubenetNetworkPlugin) Init(host network.Host) error {
return fmt.Errorf("Kubenet is not supported in this build")
}
func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interface{}) {
}
func (plugin *kubenetNetworkPlugin) Name() string {
return "kubenet"
}
func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.DockerID) error {
return fmt.Errorf("Kubenet is not supported in this build")
}
func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.DockerID) error {
return fmt.Errorf("Kubenet is not supported in this build")
}
func (plugin *kubenetNetworkPlugin) Status(namespace string, name string, id kubecontainer.DockerID) (*network.PodNetworkStatus, error) {
return nil, fmt.Errorf("Kubenet is not supported in this build")
}