Remove the legacy networking mode --configure-cbr0

pull/6/head
Lucas Käldström 2016-10-16 21:26:41 +03:00
parent 56fde00f1f
commit 0800df74ab
10 changed files with 19 additions and 414 deletions

View File

@ -175,8 +175,6 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.RktAPIEndpoint, "rkt-api-endpoint", s.RktAPIEndpoint, "The endpoint of the rkt API service to communicate with. Only used if --container-runtime='rkt'.")
fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used.")
fs.MarkDeprecated("rkt-stage1-image", "Will be removed in a future version. The default stage1 image will be specified by the rkt configurations, see https://github.com/coreos/rkt/blob/master/Documentation/configuration.md for more details.")
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
fs.MarkDeprecated("configure-cbr0", "Will be removed in a future version. Please use kubenet or other network plugins.")
fs.StringVar(&s.HairpinMode, "hairpin-mode", s.HairpinMode, "How should the kubelet setup hairpin NAT. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service. Valid values are \"promiscuous-bridge\", \"hairpin-veth\" and \"none\".")
fs.BoolVar(&s.BabysitDaemons, "babysit-daemons", s.BabysitDaemons, "If true, the node has babysitter process monitoring docker and kubelet.")
fs.MarkDeprecated("babysit-daemons", "Will be removed in a future version.")
@ -200,10 +198,10 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.Float64Var(&s.ChaosChance, "chaos-chance", s.ChaosChance, "If > 0.0, introduce random client errors and latency. Intended for testing. [default=0.0]")
fs.BoolVar(&s.Containerized, "containerized", s.Containerized, "Experimental support for running kubelet in a container. Intended for testing. [default=false]")
fs.Int64Var(&s.MaxOpenFiles, "max-open-files", s.MaxOpenFiles, "Number of files that can be opened by Kubelet process. [default=1000000]")
fs.BoolVar(&s.ReconcileCIDR, "reconcile-cidr", s.ReconcileCIDR, "Reconcile node CIDR with the CIDR specified by the API server. No-op if register-node or configure-cbr0 is false. [default=true]")
fs.BoolVar(&s.ReconcileCIDR, "reconcile-cidr", s.ReconcileCIDR, "Reconcile node CIDR with the CIDR specified by the API server. Won't have any effect if register-node is false. [default=true]")
fs.Var(&s.SystemReserved, "system-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]")
fs.Var(&s.KubeReserved, "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]")
fs.BoolVar(&s.RegisterSchedulable, "register-schedulable", s.RegisterSchedulable, "Register the node as schedulable. No-op if register-node is false. [default=true]")
fs.BoolVar(&s.RegisterSchedulable, "register-schedulable", s.RegisterSchedulable, "Register the node as schedulable. Won't have any effect if register-node is false. [default=true]")
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.")
fs.Int32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")

View File

@ -90,7 +90,6 @@ concurrent-resource-quota-syncs
concurrent-serviceaccount-token-syncs
concurrent-service-syncs
config-sync-period
configure-cbr0
configure-cloud-routes
conntrack-max
conntrack-max-per-core

View File

@ -317,18 +317,14 @@ type KubeletConfiguration struct {
// This will cause the kubelet to listen to inotify events on the lock file,
// releasing it and exiting when another process tries to open that file.
ExitOnLockContention bool `json:"exitOnLockContention"`
// configureCBR0 enables the kublet to configure cbr0 based on
// Node.Spec.PodCIDR.
ConfigureCBR0 bool `json:"configureCbr0"`
// How should the kubelet configure the container bridge for hairpin packets.
// Setting this flag allows endpoints in a Service to loadbalance back to
// themselves if they should try to access their own Service. Values:
// "promiscuous-bridge": make the container bridge promiscuous.
// "hairpin-veth": set the hairpin flag on container veth interfaces.
// "none": do nothing.
// Setting --configure-cbr0 to false implies that to achieve hairpin NAT
// one must set --hairpin-mode=veth-flag, because bridge assumes the
// existence of a container bridge named cbr0.
// Generally, one must set --hairpin-mode=veth-flag to achieve hairpin NAT,
// because promiscous-bridge assumes the existence of a container bridge named cbr0.
HairpinMode string `json:"hairpinMode"`
// The node has babysitter process monitoring docker and kubelet.
BabysitDaemons bool `json:"babysitDaemons"`
@ -354,10 +350,10 @@ type KubeletConfiguration struct {
// maxOpenFiles is Number of files that can be opened by Kubelet process.
MaxOpenFiles int64 `json:"maxOpenFiles"`
// reconcileCIDR is Reconcile node CIDR with the CIDR specified by the
// API server. No-op if register-node or configure-cbr0 is false.
// API server. Won't have any effect if register-node is false.
ReconcileCIDR bool `json:"reconcileCIDR"`
// registerSchedulable tells the kubelet to register the node as
// schedulable. No-op if register-node is false.
// schedulable. Won't have any effect if register-node is false.
RegisterSchedulable bool `json:"registerSchedulable"`
// contentType is contentType of requests sent to apiserver.
ContentType string `json:"contentType"`

View File

@ -161,9 +161,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
if obj.CertDirectory == "" {
obj.CertDirectory = "/var/run/kubernetes"
}
if obj.ConfigureCBR0 == nil {
obj.ConfigureCBR0 = boolVar(false)
}
if obj.CgroupsPerQOS == nil {
obj.CgroupsPerQOS = boolVar(false)
}

View File

@ -372,18 +372,14 @@ type KubeletConfiguration struct {
// This will cause the kubelet to listen to inotify events on the lock file,
// releasing it and exiting when another process tries to open that file.
ExitOnLockContention bool `json:"exitOnLockContention"`
// configureCBR0 enables the kublet to configure cbr0 based on
// Node.Spec.PodCIDR.
ConfigureCBR0 *bool `json:"configureCbr0"`
// How should the kubelet configure the container bridge for hairpin packets.
// Setting this flag allows endpoints in a Service to loadbalance back to
// themselves if they should try to access their own Service. Values:
// "promiscuous-bridge": make the container bridge promiscuous.
// "hairpin-veth": set the hairpin flag on container veth interfaces.
// "none": do nothing.
// Setting --configure-cbr0 to false implies that to achieve hairpin NAT
// one must set --hairpin-mode=veth-flag, because bridge assumes the
// existence of a container bridge named cbr0.
// Generally, one must set --hairpin-mode=veth-flag to achieve hairpin NAT,
// because promiscous-bridge assumes the existence of a container bridge named cbr0.
HairpinMode string `json:"hairpinMode"`
// The node has babysitter process monitoring docker and kubelet.
BabysitDaemons bool `json:"babysitDaemons"`
@ -409,10 +405,10 @@ type KubeletConfiguration struct {
// maxOpenFiles is Number of files that can be opened by Kubelet process.
MaxOpenFiles int64 `json:"maxOpenFiles"`
// reconcileCIDR is Reconcile node CIDR with the CIDR specified by the
// API server. No-op if register-node or configure-cbr0 is false.
// API server. Won't have any effect if register-node is false.
ReconcileCIDR *bool `json:"reconcileCIDR"`
// registerSchedulable tells the kubelet to register the node as
// schedulable. No-op if register-node is false.
// schedulable. Won't have any effect if register-node is false.
RegisterSchedulable *bool `json:"registerSchedulable"`
// contentType is contentType of requests sent to apiserver.
ContentType string `json:"contentType"`

View File

@ -1,160 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"bytes"
"fmt"
"net"
"os"
"os/exec"
"regexp"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/kubernetes/pkg/util/procfs"
"syscall"
)
var cidrRegexp = regexp.MustCompile(`inet ([0-9a-fA-F.:]*/[0-9]*)`)
func createCBR0(wantCIDR *net.IPNet, babysitDaemons bool) error {
// recreate cbr0 with wantCIDR
if err := exec.Command("brctl", "addbr", "cbr0").Run(); err != nil {
glog.Error(err)
return err
}
if err := exec.Command("ip", "addr", "add", wantCIDR.String(), "dev", "cbr0").Run(); err != nil {
glog.Error(err)
return err
}
if err := exec.Command("ip", "link", "set", "dev", "cbr0", "mtu", "1460", "up").Run(); err != nil {
glog.Error(err)
return err
}
// Stop docker so that babysitter process can restart it again with proper configurations and
// checkpoint file (https://github.com/docker/docker/issues/18283). It is safe to kill docker
// process here since CIDR can be changed only once for a given node object, and node is marked
// as NotReady until the docker daemon is restarted with the newly configured custom bridge.
// TODO (dawnchen): Remove this once corrupted checkpoint issue is fixed.
//
// For now just log the error. The containerRuntime check will catch docker failures.
// TODO (dawnchen) figure out what we should do for rkt here.
if babysitDaemons {
if err := procfs.PKill("docker", syscall.SIGKILL); err != nil {
glog.Error(err)
}
} else if util.UsingSystemdInitSystem() {
if err := exec.Command("systemctl", "restart", "docker").Run(); err != nil {
glog.Error(err)
}
} else {
if err := exec.Command("service", "docker", "restart").Run(); err != nil {
glog.Error(err)
}
}
glog.V(2).Info("Recreated cbr0 and restarted docker")
return nil
}
func ensureCbr0(wantCIDR *net.IPNet, promiscuous, babysitDaemons bool) error {
exists, err := cbr0Exists()
if err != nil {
return err
}
if !exists {
glog.V(2).Infof("CBR0 doesn't exist, attempting to create it with range: %s", wantCIDR)
return createCBR0(wantCIDR, babysitDaemons)
}
if !cbr0CidrCorrect(wantCIDR) {
glog.V(2).Infof("Attempting to recreate cbr0 with address range: %s", wantCIDR)
// delete cbr0
if err := exec.Command("ip", "link", "set", "dev", "cbr0", "down").Run(); err != nil {
glog.Error(err)
return err
}
if err := exec.Command("brctl", "delbr", "cbr0").Run(); err != nil {
glog.Error(err)
return err
}
if err := createCBR0(wantCIDR, babysitDaemons); err != nil {
glog.Error(err)
return err
}
}
// Put the container bridge into promiscuous mode to force it to accept hairpin packets.
// TODO: Remove this once the kernel bug (#20096) is fixed.
if promiscuous {
// Checking if the bridge is in promiscuous mode is as expensive and more brittle than
// simply setting the flag every time.
if err := exec.Command("ip", "link", "set", "cbr0", "promisc", "on").Run(); err != nil {
glog.Error(err)
return err
}
}
return nil
}
// Check if cbr0 network interface is configured or not, and take action
// when the configuration is missing on the node, and propagate the rest
// error to kubelet to handle.
func cbr0Exists() (bool, error) {
if _, err := os.Stat("/sys/class/net/cbr0"); err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
func cbr0CidrCorrect(wantCIDR *net.IPNet) bool {
output, err := exec.Command("ip", "addr", "show", "cbr0").Output()
if err != nil {
return false
}
match := cidrRegexp.FindSubmatch(output)
if len(match) < 2 {
return false
}
cbr0IP, cbr0CIDR, err := net.ParseCIDR(string(match[1]))
if err != nil {
glog.Errorf("Couldn't parse CIDR: %q", match[1])
return false
}
cbr0CIDR.IP = cbr0IP
glog.V(5).Infof("Want cbr0 CIDR: %s, have cbr0 CIDR: %s", wantCIDR, cbr0CIDR)
return wantCIDR.IP.Equal(cbr0IP) && bytes.Equal(wantCIDR.Mask, cbr0CIDR.Mask)
}
// nonMasqueradeCIDR is the CIDR for our internal IP range; traffic to IPs
// outside this range will use IP masquerade.
func ensureIPTablesMasqRule(client iptables.Interface, nonMasqueradeCIDR string) error {
if _, err := client.EnsureRule(iptables.Append, iptables.TableNAT,
iptables.ChainPostrouting,
"-m", "comment", "--comment", "kubelet: SNAT outbound cluster traffic",
"-m", "addrtype", "!", "--dst-type", "LOCAL",
"!", "-d", nonMasqueradeCIDR,
"-j", "MASQUERADE"); err != nil {
return fmt.Errorf("Failed to ensure masquerading for %s chain %s: %v",
iptables.TableNAT, iptables.ChainPostrouting, err)
}
return nil
}

View File

@ -1,154 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"testing"
"k8s.io/kubernetes/pkg/util/dbus"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/kubernetes/pkg/util/sets"
)
func TestEnsureIPTablesMasqRuleNew(t *testing.T) {
fcmd := exec.FakeCmd{
CombinedOutputScript: []exec.FakeCombinedOutputAction{
// iptables version check
func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
// Status 1 on the first call.
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
// Success on the second call.
func() ([]byte, error) { return []byte{}, nil },
},
}
fexec := exec.FakeExec{
CommandScript: []exec.FakeCommandAction{
// iptables version check
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
// The second Command() call is checking the rule. Failure of that means create it.
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
runner := iptables.New(&fexec, dbus.NewFake(nil, nil), iptables.ProtocolIpv4)
defer runner.Destroy()
err := ensureIPTablesMasqRule(runner, "127.0.0.0/8")
if err != nil {
t.Errorf("expected success, got %v", err)
}
if fcmd.CombinedOutputCalls != 3 {
t.Errorf("expected 3 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls)
}
if !sets.NewString(fcmd.CombinedOutputLog[2]...).HasAll("iptables", "-t", "nat", "-A", "POSTROUTING",
"-m", "comment", "--comment", "kubelet: SNAT outbound cluster traffic",
"!", "-d", "127.0.0.0/8", "-j", "MASQUERADE") {
t.Errorf("wrong CombinedOutput() log, got %#v", fcmd.CombinedOutputLog[2])
}
}
func TestEnsureIPTablesMasqRuleAlreadyExists(t *testing.T) {
fcmd := exec.FakeCmd{
CombinedOutputScript: []exec.FakeCombinedOutputAction{
// iptables version check
func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
// Success.
func() ([]byte, error) { return []byte{}, nil },
},
}
fexec := exec.FakeExec{
CommandScript: []exec.FakeCommandAction{
// iptables version check
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
// The second Command() call is checking the rule. Success of that exec means "done".
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
runner := iptables.New(&fexec, dbus.NewFake(nil, nil), iptables.ProtocolIpv4)
defer runner.Destroy()
err := ensureIPTablesMasqRule(runner, "127.0.0.0/8")
if err != nil {
t.Errorf("expected success, got %v", err)
}
if fcmd.CombinedOutputCalls != 2 {
t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls)
}
if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-C", "POSTROUTING",
"-m", "comment", "--comment", "kubelet: SNAT outbound cluster traffic",
"!", "-d", "127.0.0.0/8", "-j", "MASQUERADE") {
t.Errorf("wrong CombinedOutput() log, got %#v", fcmd.CombinedOutputLog[1])
}
}
func TestEnsureIPTablesMasqRuleErrorChecking(t *testing.T) {
fcmd := exec.FakeCmd{
CombinedOutputScript: []exec.FakeCombinedOutputAction{
// iptables version check
func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
// Status 2 on the first call.
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} },
},
}
fexec := exec.FakeExec{
CommandScript: []exec.FakeCommandAction{
// iptables version check
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
// The second Command() call is checking the rule. Failure of that means create it.
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
runner := iptables.New(&fexec, dbus.NewFake(nil, nil), iptables.ProtocolIpv4)
defer runner.Destroy()
err := ensureIPTablesMasqRule(runner, "127.0.0.0/8")
if err == nil {
t.Errorf("expected failure")
}
if fcmd.CombinedOutputCalls != 2 {
t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls)
}
}
func TestEnsureIPTablesMasqRuleErrorCreating(t *testing.T) {
fcmd := exec.FakeCmd{
CombinedOutputScript: []exec.FakeCombinedOutputAction{
// iptables version check
func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
// Status 1 on the first call.
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
// Status 1 on the second call.
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
},
}
fexec := exec.FakeExec{
CommandScript: []exec.FakeCommandAction{
// iptables version check
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
// The second Command() call is checking the rule. Failure of that means create it.
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
runner := iptables.New(&fexec, dbus.NewFake(nil, nil), iptables.ProtocolIpv4)
defer runner.Destroy()
err := ensureIPTablesMasqRule(runner, "127.0.0.0/8")
if err == nil {
t.Errorf("expected failure")
}
if fcmd.CombinedOutputCalls != 3 {
t.Errorf("expected 3 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls)
}
}

View File

@ -389,13 +389,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
oomWatcher := NewOOMWatcher(kubeDeps.CAdvisorInterface, kubeDeps.Recorder)
// TODO(mtaufen): remove when internal cbr0 implementation gets removed in favor
// of the kubenet network plugin
var myConfigureCBR0 bool = kubeCfg.ConfigureCBR0
if kubeCfg.NetworkPluginName == "kubenet" {
myConfigureCBR0 = false
}
klet := &Kubelet{
hostname: hostname,
nodeName: nodeName,
@ -430,7 +423,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer,
configureCBR0: myConfigureCBR0,
nonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
reconcileCIDR: kubeCfg.ReconcileCIDR,
maxPods: int(kubeCfg.MaxPods),
@ -454,7 +446,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
iptablesDropBit: int(kubeCfg.IPTablesDropBit),
}
if mode, err := effectiveHairpinMode(componentconfig.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, kubeCfg.ConfigureCBR0, kubeCfg.NetworkPluginName); err != nil {
if mode, err := effectiveHairpinMode(componentconfig.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, kubeCfg.NetworkPluginName); err != nil {
// This is a non-recoverable error. Returning it up the callstack will just
// lead to retries of the same failure, so just fail hard.
glog.Fatalf("Invalid hairpin mode: %v", err)
@ -914,7 +906,6 @@ type Kubelet struct {
// Whether or not kubelet should take responsibility for keeping cbr0 in
// the correct state.
configureCBR0 bool
reconcileCIDR bool
// Traffic to IPs outside this range will use IP masquerade.

View File

@ -49,11 +49,10 @@ const (
// effectiveHairpinMode determines the effective hairpin mode given the
// configured mode, container runtime, and whether cbr0 should be configured.
func effectiveHairpinMode(hairpinMode componentconfig.HairpinMode, containerRuntime string, configureCBR0 bool, networkPlugin string) (componentconfig.HairpinMode, error) {
func effectiveHairpinMode(hairpinMode componentconfig.HairpinMode, containerRuntime string, networkPlugin string) (componentconfig.HairpinMode, error) {
// The hairpin mode setting doesn't matter if:
// - We're not using a bridge network. This is hard to check because we might
// be using a plugin. It matters if --configure-cbr0=true, and we currently
// don't pipe it down to any plugins.
// be using a plugin.
// - It's set to hairpin-veth for a container runtime that doesn't know how
// to set the hairpin flag on the veth's of containers. Currently the
// docker runtime is the only one that understands this.
@ -64,18 +63,14 @@ func effectiveHairpinMode(hairpinMode componentconfig.HairpinMode, containerRunt
glog.Warningf("Hairpin mode set to %q but container runtime is %q, ignoring", hairpinMode, containerRuntime)
return componentconfig.HairpinNone, nil
}
if hairpinMode == componentconfig.PromiscuousBridge && !configureCBR0 && networkPlugin != "kubenet" {
// This is not a valid combination. Users might be using the
if hairpinMode == componentconfig.PromiscuousBridge && networkPlugin != "kubenet" {
// This is not a valid combination, since promiscuous-bridge only works on kubenet. Users might be using the
// default values (from before the hairpin-mode flag existed) and we
// should keep the old behavior.
glog.Warningf("Hairpin mode set to %q but configureCBR0 is false, falling back to %q", hairpinMode, componentconfig.HairpinVeth)
glog.Warningf("Hairpin mode set to %q but kubenet is not enabled, falling back to %q", hairpinMode, componentconfig.HairpinVeth)
return componentconfig.HairpinVeth, nil
}
} else if hairpinMode == componentconfig.HairpinNone {
if configureCBR0 {
glog.Warningf("Hairpin mode set to %q and configureCBR0 is true, this might result in loss of hairpin packets", hairpinMode)
}
} else {
} else if hairpinMode != componentconfig.HairpinNone {
return "", fmt.Errorf("unknown value: %q", hairpinMode)
}
return hairpinMode, nil
@ -195,60 +190,8 @@ func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error {
return nil
}
// TODO: remove when kubenet plugin is ready
// NOTE!!! if you make changes here, also make them to kubenet
func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
if podCIDR == "" {
glog.V(5).Info("PodCIDR not set. Will not configure cbr0.")
return nil
}
glog.V(5).Infof("PodCIDR is set to %q", podCIDR)
_, cidr, err := net.ParseCIDR(podCIDR)
if err != nil {
return err
}
// Set cbr0 interface address to first address in IPNet
cidr.IP.To4()[3] += 1
if err := ensureCbr0(cidr, kl.hairpinMode == componentconfig.PromiscuousBridge, kl.babysitDaemons); err != nil {
return err
}
if kl.shapingEnabled() {
if kl.shaper == nil {
glog.V(5).Info("Shaper is nil, creating")
kl.shaper = bandwidth.NewTCShaper("cbr0")
}
return kl.shaper.ReconcileInterface()
}
return nil
}
// syncNetworkStatus updates the network state, ensuring that the network is
// configured correctly if the kubelet is set to configure cbr0:
// * ensure that iptables masq rules are setup
// * reconcile cbr0 with the pod CIDR
// syncNetworkStatus updates the network state
func (kl *Kubelet) syncNetworkStatus() {
var err error
if kl.configureCBR0 {
if err := ensureIPTablesMasqRule(kl.iptClient, kl.nonMasqueradeCIDR); err != nil {
err = fmt.Errorf("Error on adding iptables rules: %v", err)
glog.Error(err)
kl.runtimeState.setNetworkState(err)
return
}
podCIDR := kl.runtimeState.podCIDR()
if len(podCIDR) == 0 {
err = fmt.Errorf("ConfigureCBR0 requested, but PodCIDR not set. Will not configure CBR0 right now")
glog.Warning(err)
} else if err = kl.reconcileCBR0(podCIDR); err != nil {
err = fmt.Errorf("Error configuring cbr0: %v", err)
glog.Error(err)
}
if err != nil {
kl.runtimeState.setNetworkState(err)
return
}
}
kl.runtimeState.setNetworkState(kl.networkPlugin.Status())
}

View File

@ -138,8 +138,7 @@ func GetHollowKubeletConfig(
c.EnableDebuggingHandlers = true
c.EnableServer = true
c.CgroupsPerQOS = false
// Since this kubelet runs with --configure-cbr0=false, it needs to use
// hairpin-veth to allow hairpin packets. Note that this deviates from
// hairpin-veth is used to allow hairpin packets. Note that this deviates from
// what the "real" kubelet currently does, because there's no way to
// set promiscuous mode on docker0.
c.HairpinMode = componentconfig.HairpinVeth