Remove outdated network plugin code

The code was added to support rktnetes and non-CRI docker integrations.
These legacy integrations have already been removed from the codebase.
This change removes the compatibility code existing soley for the
legacy integrations.
pull/8/head
Yu-Ju Hong 2018-04-06 12:15:26 -07:00
parent 99e77a76be
commit 37d30a0815
9 changed files with 111 additions and 426 deletions

View File

@ -359,7 +359,6 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err
ExternalKubeClient: nil,
EventClient: nil,
Mounter: mounter,
NetworkPlugins: ProbeNetworkPlugins(s.CNIConfDir, cni.SplitDirs(s.CNIBinDir)),
OOMAdjuster: oom.NewOOMAdjuster(),
OSInterface: kubecontainer.RealOS{},
Writer: writer,
@ -1112,7 +1111,6 @@ func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConf
}
// Initialize network plugin settings.
nh := &kubelet.NoOpLegacyHost{}
pluginSettings := dockershim.NetworkPluginSettings{
HairpinMode: kubeletconfiginternal.HairpinMode(c.HairpinMode),
NonMasqueradeCIDR: f.NonMasqueradeCIDR,
@ -1120,7 +1118,6 @@ func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConf
PluginConfDir: r.CNIConfDir,
PluginBinDirs: cni.SplitDirs(r.CNIBinDir),
MTU: int(r.NetworkPluginMTU),
LegacyRuntimeHost: nh,
}
// Initialize streaming configuration. (Not using TLS now)

View File

@ -120,13 +120,6 @@ type NetworkPluginSettings struct {
PluginConfDir string
// MTU is the desired MTU for network devices created by the plugin.
MTU int
// RuntimeHost is an interface that serves as a trap-door from plugin back
// into the kubelet.
// TODO: This shouldn't be required, remove once we move host ports into CNI
// and figure out bandwidth shaping. See corresponding comments above
// network.Host interface.
LegacyRuntimeHost network.LegacyHost
}
// namespaceGetter is a wrapper around the dockerService that implements
@ -153,7 +146,6 @@ func (p *portMappingGetter) GetPodPortMappings(containerID string) ([]*hostport.
// and dockerServices which implements the rest of the network host interfaces.
// The legacy host methods are slated for deletion.
type dockerNetworkHost struct {
network.LegacyHost
*namespaceGetter
*portMappingGetter
}
@ -232,7 +224,6 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon
cniPlugins := cni.ProbeNetworkPlugins(pluginSettings.PluginConfDir, pluginSettings.PluginBinDirs)
cniPlugins = append(cniPlugins, kubenet.NewPlugin(pluginSettings.PluginBinDirs))
netHost := &dockerNetworkHost{
pluginSettings.LegacyRuntimeHost,
&namespaceGetter{ds},
&portMappingGetter{ds},
}

View File

@ -76,7 +76,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/kubelet/network/cni"
"k8s.io/kubernetes/pkg/kubelet/network/dns"
"k8s.io/kubernetes/pkg/kubelet/pleg"
@ -238,7 +237,6 @@ type Dependencies struct {
KubeClient clientset.Interface
ExternalKubeClient clientset.Interface
Mounter mount.Interface
NetworkPlugins []network.NetworkPlugin
OOMAdjuster *oom.OOMAdjuster
OSInterface kubecontainer.OSInterface
PodConfig *config.PodConfig
@ -545,7 +543,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
glog.Infof("Experimental host user namespace defaulting is enabled.")
}
hairpinMode, err := effectiveHairpinMode(kubeletconfiginternal.HairpinMode(kubeCfg.HairpinMode), containerRuntime, crOptions.NetworkPluginName)
hairpinMode, err := effectiveHairpinMode(kubeletconfiginternal.HairpinMode(kubeCfg.HairpinMode), crOptions.NetworkPluginName)
if err != nil {
// This is a non-recoverable error. Returning it up the callstack will just
// lead to retries of the same failure, so just fail hard.
@ -553,11 +551,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
}
glog.Infof("Hairpin mode set to %q", hairpinMode)
plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, nonMasqueradeCIDR, int(crOptions.NetworkPluginMTU))
if err != nil {
return nil, err
}
klet.networkPlugin = plug
machineInfo, err := klet.cadvisor.MachineInfo()
if err != nil {
return nil, err
@ -591,21 +584,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.resourceAnalyzer = serverstats.NewResourceAnalyzer(klet, kubeCfg.VolumeStatsAggPeriod.Duration)
// Remote runtime shim just cannot talk back to kubelet, so it doesn't
// support bandwidth shaping or hostports till #35457. To enable legacy
// features, replace with networkHost.
var nl *NoOpLegacyHost
pluginSettings.LegacyRuntimeHost = nl
if containerRuntime == "rkt" {
glog.Fatalln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.")
}
// kubelet defers to the runtime shim to setup networking. Setting
// this to nil will prevent it from trying to invoke the plugin.
// It's easier to always probe and initialize plugins till cri
// becomes the default.
klet.networkPlugin = nil
// if left at nil, that means it is unneeded
var legacyLogProvider kuberuntime.LegacyLogProvider
@ -940,9 +922,6 @@ type Kubelet struct {
// Volume plugins.
volumePluginMgr *volume.VolumePluginMgr
// Network plugin.
networkPlugin network.NetworkPlugin
// Handles container probing.
probeManager prober.Manager
// Manages container health check results.
@ -1330,7 +1309,6 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
// Start syncing node status immediately, this may set up things the runtime needs to run.
go wait.Until(kl.syncNodeStatus, kl.nodeStatusUpdateFrequency, wait.NeverStop)
}
go wait.Until(kl.syncNetworkStatus, 30*time.Second, wait.NeverStop)
go wait.Until(kl.updateRuntimeUp, 5*time.Second, wait.NeverStop)
// Start loop to sync iptables util rules

View File

@ -21,12 +21,8 @@ import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
@ -45,80 +41,9 @@ const (
KubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL"
)
// This just exports required functions from kubelet proper, for use by network
// plugins.
// TODO(#35457): get rid of this backchannel to the kubelet. The scope of
// the back channel is restricted to host-ports/testing, and restricted
// to kubenet. No other network plugin wrapper needs it. Other plugins
// only require a way to access namespace information, which they can do
// directly through the methods implemented by criNetworkHost.
type networkHost struct {
kubelet *Kubelet
}
func (nh *networkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) {
return nh.kubelet.GetPodByName(name, namespace)
}
func (nh *networkHost) GetKubeClient() clientset.Interface {
return nh.kubelet.kubeClient
}
func (nh *networkHost) GetRuntime() kubecontainer.Runtime {
return nh.kubelet.getRuntime()
}
func (nh *networkHost) SupportsLegacyFeatures() bool {
return true
}
// criNetworkHost implements the part of network.Host required by the
// cri (NamespaceGetter). It leechs off networkHost for all other
// methods, because networkHost is slated for deletion.
type criNetworkHost struct {
*networkHost
// criNetworkHost currently support legacy features. Hence no need to support PortMappingGetter
*network.NoopPortMappingGetter
}
// GetNetNS returns the network namespace of the given containerID.
// This method satisfies the network.NamespaceGetter interface for
// networkHost. It's only meant to be used from network plugins
// that are directly invoked by the kubelet (aka: legacy, pre-cri).
// Any network plugin invoked by a cri must implement NamespaceGetter
// to talk directly to the runtime instead.
func (c *criNetworkHost) GetNetNS(containerID string) (string, error) {
return c.kubelet.getRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID})
}
// NoOpLegacyHost implements the network.LegacyHost interface for the remote
// runtime shim by just returning empties. It doesn't support legacy features
// like host port and bandwidth shaping.
type NoOpLegacyHost struct{}
// GetPodByName always returns "nil, true" for 'NoOpLegacyHost'
func (n *NoOpLegacyHost) GetPodByName(namespace, name string) (*v1.Pod, bool) {
return nil, true
}
// GetKubeClient always returns "nil" for 'NoOpLegacyHost'
func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface {
return nil
}
// getRuntime always returns "nil" for 'NoOpLegacyHost'
func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime {
return nil
}
// SupportsLegacyFeatures always returns "false" for 'NoOpLegacyHost'
func (n *NoOpLegacyHost) SupportsLegacyFeatures() bool {
return false
}
// effectiveHairpinMode determines the effective hairpin mode given the
// configured mode, container runtime, and whether cbr0 should be configured.
func effectiveHairpinMode(hairpinMode kubeletconfig.HairpinMode, containerRuntime string, networkPlugin string) (kubeletconfig.HairpinMode, error) {
func effectiveHairpinMode(hairpinMode kubeletconfig.HairpinMode, networkPlugin string) (kubeletconfig.HairpinMode, error) {
// The hairpin mode setting doesn't matter if:
// - We're not using a bridge network. This is hard to check because we might
// be using a plugin.
@ -127,11 +52,6 @@ func effectiveHairpinMode(hairpinMode kubeletconfig.HairpinMode, containerRuntim
// docker runtime is the only one that understands this.
// - It's set to "none".
if hairpinMode == kubeletconfig.PromiscuousBridge || hairpinMode == kubeletconfig.HairpinVeth {
// Only on docker.
if containerRuntime != kubetypes.DockerContainerRuntime {
glog.Warningf("Hairpin mode set to %q but container runtime is %q, ignoring", hairpinMode, containerRuntime)
return kubeletconfig.HairpinNone, nil
}
if hairpinMode == kubeletconfig.PromiscuousBridge && networkPlugin != "kubenet" {
// This is not a valid combination, since promiscuous-bridge only works on kubenet. Users might be using the
// default values (from before the hairpin-mode flag existed) and we
@ -159,16 +79,6 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
return supported
}
// syncNetworkStatus updates the network state
func (kl *Kubelet) syncNetworkStatus() {
// For cri integration, network state will be updated in updateRuntimeUp,
// we'll get runtime network status through cri directly.
// TODO: Remove this once we completely switch to cri integration.
if kl.networkPlugin != nil {
kl.runtimeState.setNetworkState(kl.networkPlugin.Status())
}
}
// updatePodCIDR updates the pod CIDR in the runtime state if it is different
// from the current CIDR.
func (kl *Kubelet) updatePodCIDR(cidr string) {
@ -178,14 +88,6 @@ func (kl *Kubelet) updatePodCIDR(cidr string) {
return
}
// kubelet -> network plugin
// cri runtime shims are responsible for their own network plugins
if kl.networkPlugin != nil {
details := make(map[string]interface{})
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = cidr
kl.networkPlugin.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
}
// kubelet -> generic runtime -> runtime shim -> network plugin
// docker/non-cri implementations have a passthrough UpdatePodCIDR
if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil {

View File

@ -22,73 +22,94 @@ import (
"github.com/stretchr/testify/assert"
)
func TestNetworkHostGetsPodNotFound(t *testing.T) {
testKubelet := newTestKubelet(t, true)
defer testKubelet.Cleanup()
nh := networkHost{testKubelet.kubelet}
actualPod, _ := nh.GetPodByName("", "")
if actualPod != nil {
t.Fatalf("Was expected nil, received %v instead", actualPod)
func TestNodeIPParam(t *testing.T) {
type test struct {
nodeIP string
success bool
testName string
}
}
func TestNetworkHostGetsKubeClient(t *testing.T) {
testKubelet := newTestKubelet(t, true)
defer testKubelet.Cleanup()
nh := networkHost{testKubelet.kubelet}
if nh.GetKubeClient() != testKubelet.fakeKubeClient {
t.Fatalf("NetworkHost client does not match testKubelet's client")
tests := []test{
{
nodeIP: "",
success: false,
testName: "IP not set",
},
{
nodeIP: "127.0.0.1",
success: false,
testName: "IPv4 loopback address",
},
{
nodeIP: "::1",
success: false,
testName: "IPv6 loopback address",
},
{
nodeIP: "224.0.0.1",
success: false,
testName: "multicast IPv4 address",
},
{
nodeIP: "ff00::1",
success: false,
testName: "multicast IPv6 address",
},
{
nodeIP: "169.254.0.1",
success: false,
testName: "IPv4 link-local unicast address",
},
{
nodeIP: "fe80::0202:b3ff:fe1e:8329",
success: false,
testName: "IPv6 link-local unicast address",
},
{
nodeIP: "0.0.0.0",
success: false,
testName: "Unspecified IPv4 address",
},
{
nodeIP: "::",
success: false,
testName: "Unspecified IPv6 address",
},
{
nodeIP: "1.2.3.4",
success: false,
testName: "IPv4 address that doesn't belong to host",
},
}
}
func TestNetworkHostGetsRuntime(t *testing.T) {
testKubelet := newTestKubelet(t, true)
defer testKubelet.Cleanup()
nh := networkHost{testKubelet.kubelet}
if nh.GetRuntime() != testKubelet.fakeRuntime {
t.Fatalf("NetworkHost runtime does not match testKubelet's runtime")
addrs, err := net.InterfaceAddrs()
if err != nil {
assert.Error(t, err, fmt.Sprintf(
"Unable to obtain a list of the node's unicast interface addresses."))
}
}
func TestNetworkHostSupportsLegacyFeatures(t *testing.T) {
testKubelet := newTestKubelet(t, true)
defer testKubelet.Cleanup()
nh := networkHost{testKubelet.kubelet}
if nh.SupportsLegacyFeatures() == false {
t.Fatalf("SupportsLegacyFeatures should not be false")
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
break
}
successTest := test{
nodeIP: ip.String(),
success: true,
testName: fmt.Sprintf("Success test case for address %s", ip.String()),
}
tests = append(tests, successTest)
}
}
func TestNoOpHostGetsName(t *testing.T) {
nh := NoOpLegacyHost{}
pod, err := nh.GetPodByName("", "")
if pod != nil && err != true {
t.Fatalf("noOpLegacyHost getpodbyname expected to be nil and true")
}
}
func TestNoOpHostGetsKubeClient(t *testing.T) {
nh := NoOpLegacyHost{}
if nh.GetKubeClient() != nil {
t.Fatalf("noOpLegacyHost client expected to be nil")
}
}
func TestNoOpHostGetsRuntime(t *testing.T) {
nh := NoOpLegacyHost{}
if nh.GetRuntime() != nil {
t.Fatalf("noOpLegacyHost runtime expected to be nil")
}
}
func TestNoOpHostSupportsLegacyFeatures(t *testing.T) {
nh := NoOpLegacyHost{}
if nh.SupportsLegacyFeatures() != false {
t.Fatalf("noOpLegacyHost legacy features expected to be false")
for _, test := range tests {
err := validateNodeIP(net.ParseIP(test.nodeIP))
if test.success {
assert.NoError(t, err, "test %s", test.testName)
} else {
assert.Error(t, err, fmt.Sprintf("test %s", test.testName))
}
}
}

View File

@ -41,7 +41,6 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config"
@ -52,8 +51,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/network"
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
"k8s.io/kubernetes/pkg/kubelet/pleg"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
@ -168,7 +165,6 @@ func newTestKubeletWithImageList(
kubelet.nodeName = types.NodeName(testKubeletHostname)
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
kubelet.runtimeState.setNetworkState(nil)
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), kubeletconfig.HairpinNone, "", 1440)
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
} else {

View File

@ -22,7 +22,6 @@ import (
"fmt"
"io/ioutil"
"net"
"path/filepath"
"strings"
"sync"
"time"
@ -33,7 +32,6 @@ import (
"github.com/golang/glog"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilsets "k8s.io/apimachinery/pkg/util/sets"
@ -299,9 +297,7 @@ func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int {
}
// setup sets up networking through CNI using the given ns/name and sandbox ID.
// TODO: Don't pass the pod to this method, it only needs it for bandwidth
// shaping and hostport management.
func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, pod *v1.Pod, annotations map[string]string) error {
func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error {
// Disable DAD so we skip the kernel delay on bringing up new interfaces.
if err := plugin.disableContainerDAD(id); err != nil {
glog.V(3).Infof("Failed to disable DAD in container: %v", err)
@ -364,36 +360,21 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube
}
}
// The host can choose to not support "legacy" features. The remote
// shim doesn't support it (#35457), but the kubelet does.
if plugin.host.SupportsLegacyFeatures() {
// Open any hostport the pod's containers want
activePodPortMappings, err := plugin.getPodPortMappings()
if err != nil {
// TODO: replace with CNI port-forwarding plugin
portMappings, err := plugin.host.GetPodPortMappings(id.ID)
if err != nil {
return err
}
if portMappings != nil && len(portMappings) > 0 {
if err := plugin.hostportManager.Add(id.ID, &hostport.PodPortMapping{
Namespace: namespace,
Name: name,
PortMappings: portMappings,
IP: ip4,
HostNetwork: false,
}, BridgeName); err != nil {
return err
}
newPodPortMapping := hostport.ConstructPodPortMapping(pod, ip4)
if err := plugin.hostportSyncer.OpenPodHostportsAndSync(newPodPortMapping, BridgeName, activePodPortMappings); err != nil {
return err
}
} else {
// TODO: replace with CNI port-forwarding plugin
portMappings, err := plugin.host.GetPodPortMappings(id.ID)
if err != nil {
return err
}
if portMappings != nil && len(portMappings) > 0 {
if err := plugin.hostportManager.Add(id.ID, &hostport.PodPortMapping{
Namespace: namespace,
Name: name,
PortMappings: portMappings,
IP: ip4,
HostNetwork: false,
}, BridgeName); err != nil {
return err
}
}
}
return nil
}
@ -407,38 +388,17 @@ func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id k
glog.V(4).Infof("SetUpPod took %v for %s/%s", time.Since(start), namespace, name)
}()
// TODO: Entire pod object only required for bw shaping and hostport.
pod, ok := plugin.host.GetPodByName(namespace, name)
if !ok {
return fmt.Errorf("pod %q cannot be found", name)
}
if err := plugin.Status(); err != nil {
return fmt.Errorf("Kubenet cannot SetUpPod: %v", err)
}
if err := plugin.setup(namespace, name, id, pod, annotations); err != nil {
if err := plugin.setup(namespace, name, id, annotations); err != nil {
// Make sure everything gets cleaned up on errors
podIP, _ := plugin.podIPs[id]
if err := plugin.teardown(namespace, name, id, podIP); err != nil {
// Not a hard error or warning
glog.V(4).Infof("Failed to clean up %s/%s after SetUpPod failure: %v", namespace, name, err)
}
// TODO(#34278): Figure out if we need IP GC through the cri.
// The cri should always send us teardown events for stale sandboxes,
// this obviates the need for GC in the common case, for kubenet.
if plugin.host.SupportsLegacyFeatures() {
// TODO: Remove this hack once we've figured out how to retrieve the netns
// of an exited container. Currently, restarting docker will leak a bunch of
// ips. This will exhaust available ip space unless we cleanup old ips. At the
// same time we don't want to try GC'ing them periodically as that could lead
// to a performance regression in starting pods. So on each setup failure, try
// GC on the assumption that the kubelet is going to retry pod creation, and
// when it does, there will be ips.
plugin.ipamGarbageCollection()
}
return err
}
@ -475,30 +435,18 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k
}
}
// The host can choose to not support "legacy" features. The remote
// shim doesn't support it (#35457), but the kubelet does.
if plugin.host.SupportsLegacyFeatures() {
activePodPortMapping, err := plugin.getPodPortMappings()
if err == nil {
err = plugin.hostportSyncer.SyncHostports(BridgeName, activePodPortMapping)
}
if err != nil {
portMappings, err := plugin.host.GetPodPortMappings(id.ID)
if err != nil {
errList = append(errList, err)
} else if portMappings != nil && len(portMappings) > 0 {
if err = plugin.hostportManager.Remove(id.ID, &hostport.PodPortMapping{
Namespace: namespace,
Name: name,
PortMappings: portMappings,
HostNetwork: false,
}); err != nil {
errList = append(errList, err)
}
} else {
portMappings, err := plugin.host.GetPodPortMappings(id.ID)
if err != nil {
errList = append(errList, err)
} else if portMappings != nil && len(portMappings) > 0 {
if err = plugin.hostportManager.Remove(id.ID, &hostport.PodPortMapping{
Namespace: namespace,
Name: name,
PortMappings: portMappings,
HostNetwork: false,
}); err != nil {
errList = append(errList, err)
}
}
}
return utilerrors.NewAggregate(errList)
}
@ -599,119 +547,6 @@ func (plugin *kubenetNetworkPlugin) checkRequiredCNIPluginsInOneDir(dir string)
return true
}
// getNonExitedPods returns a list of pods that have at least one running container.
func (plugin *kubenetNetworkPlugin) getNonExitedPods() ([]*kubecontainer.Pod, error) {
ret := []*kubecontainer.Pod{}
pods, err := plugin.host.GetRuntime().GetPods(true)
if err != nil {
return nil, fmt.Errorf("Failed to retrieve pods from runtime: %v", err)
}
for _, p := range pods {
if podIsExited(p) {
continue
}
ret = append(ret, p)
}
return ret, nil
}
func (plugin *kubenetNetworkPlugin) getPodPortMappings() ([]*hostport.PodPortMapping, error) {
pods, err := plugin.getNonExitedPods()
if err != nil {
return nil, err
}
activePodPortMappings := make([]*hostport.PodPortMapping, 0)
for _, p := range pods {
containerID, err := plugin.host.GetRuntime().GetPodContainerID(p)
if err != nil {
continue
}
ipString, ok := plugin.podIPs[containerID]
if !ok {
continue
}
podIP := net.ParseIP(ipString)
if podIP == nil {
continue
}
if pod, ok := plugin.host.GetPodByName(p.Namespace, p.Name); ok {
activePodPortMappings = append(activePodPortMappings, hostport.ConstructPodPortMapping(pod, podIP))
}
}
return activePodPortMappings, nil
}
// ipamGarbageCollection will release unused IP.
// kubenet uses the CNI bridge plugin, which stores allocated ips on file. Each
// file created under defaultIPAMDir has the format: ip/container-hash. So this
// routine looks for hashes that are not reported by the currently running docker,
// and invokes DelNetwork on each one. Note that this will only work for the
// current CNI bridge plugin, because we have no way of finding the NetNs.
func (plugin *kubenetNetworkPlugin) ipamGarbageCollection() {
glog.V(2).Infof("Starting IP garbage collection")
ipamDir := filepath.Join(defaultIPAMDir, KubenetPluginName)
files, err := ioutil.ReadDir(ipamDir)
if err != nil {
glog.Errorf("Failed to list files in %q: %v", ipamDir, err)
return
}
// gather containerIDs for allocated ips
ipContainerIdMap := make(map[string]string)
for _, file := range files {
// skip non checkpoint file
if ip := net.ParseIP(file.Name()); ip == nil {
continue
}
content, err := ioutil.ReadFile(filepath.Join(ipamDir, file.Name()))
if err != nil {
glog.Errorf("Failed to read file %v: %v", file, err)
}
ipContainerIdMap[file.Name()] = strings.TrimSpace(string(content))
}
// gather infra container IDs of current running Pods
runningContainerIDs := utilsets.String{}
pods, err := plugin.getNonExitedPods()
if err != nil {
glog.Errorf("Failed to get pods: %v", err)
return
}
for _, pod := range pods {
containerID, err := plugin.host.GetRuntime().GetPodContainerID(pod)
if err != nil {
glog.Warningf("Failed to get infra containerID of %q/%q: %v", pod.Namespace, pod.Name, err)
continue
}
runningContainerIDs.Insert(strings.TrimSpace(containerID.ID))
}
// release leaked ips
for ip, containerID := range ipContainerIdMap {
// if the container is not running, release IP
if runningContainerIDs.Has(containerID) {
continue
}
// CNI requires all config to be presented, although only containerID is needed in this case
rt := &libcni.RuntimeConf{
ContainerID: containerID,
IfName: network.DefaultInterfaceName,
// TODO: How do we find the NetNs of an exited container? docker inspect
// doesn't show us the pid, so we probably need to checkpoint
NetNS: "",
}
glog.V(2).Infof("Releasing IP %q allocated to %q.", ip, containerID)
// CNI bridge plugin should try to release IP and then return
if err := plugin.cniConfig.DelNetwork(plugin.netConfig, rt); err != nil {
glog.Errorf("Error while releasing IP: %v", err)
}
}
}
// podIsExited returns true if the pod is exited (all containers inside are exited).
func podIsExited(p *kubecontainer.Pod) bool {
for _, c := range p.Containers {

View File

@ -24,12 +24,10 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilsets "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
@ -91,29 +89,6 @@ type PodNetworkStatus struct {
IP net.IP `json:"ip" description:"Primary IP address of the pod"`
}
// LegacyHost implements the methods required by network plugins that
// were directly invoked by the kubelet. Implementations of this interface
// that do not wish to support these features can simply return false
// to SupportsLegacyFeatures.
type LegacyHost interface {
// Get the pod structure by its name, namespace
// Only used for hostport management and bw shaping
GetPodByName(namespace, name string) (*v1.Pod, bool)
// GetKubeClient returns a client interface
// Only used in testing
GetKubeClient() clientset.Interface
// GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt)
// Only used for hostport management
GetRuntime() kubecontainer.Runtime
// SupportsLegacyFeatures returns true if the network host support GetPodByName, KubeClient interface and kubelet
// runtime interface. These interfaces will no longer be implemented by CRI shims.
// This function helps network plugins to choose their behavior based on runtime.
SupportsLegacyFeatures() bool
}
// Host is an interface that plugins can use to access the kubelet.
// TODO(#35457): get rid of this backchannel to the kubelet. The scope of
// the back channel is restricted to host-ports/testing, and restricted
@ -126,12 +101,6 @@ type Host interface {
// PortMappingGetter is a getter for sandbox port mapping information.
PortMappingGetter
// LegacyHost contains methods that trap back into the Kubelet. Dependence
// *do not* add more dependencies in this interface. In a post-cri world,
// network plugins will be invoked by the runtime shim, and should only
// require GetNetNS and GetPodPortMappings.
LegacyHost
}
// NamespaceGetter is an interface to retrieve namespace information for a given

View File

@ -30,15 +30,12 @@ import (
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/configmap"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/network"
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
"k8s.io/kubernetes/pkg/kubelet/secret"
@ -111,7 +108,6 @@ func TestRunOnce(t *testing.T) {
false, /* experimentalCheckNodeCapabilitiesBeforeMount */
false /* keepTerminatedPodVolumes */)
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), kubeletconfig.HairpinNone, "", network.UseDefaultMTU)
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
volumeStatsAggPeriod := time.Second * 10
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod)