mirror of https://github.com/k3s-io/k3s
Merge pull request #5387 from vadorovsky/kube-router-dual-stack
netpol: Add dual-stack supportpull/5391/head
commit
9350016de8
2
go.mod
2
go.mod
|
@ -5,6 +5,7 @@ go 1.16
|
|||
replace (
|
||||
github.com/Microsoft/hcsshim => github.com/Microsoft/hcsshim v0.8.22
|
||||
github.com/benmoss/go-powershell => github.com/k3s-io/go-powershell v0.0.0-20201118222746-51f4c451fbd7
|
||||
github.com/cloudnativelabs/kube-router => github.com/k3s-io/kube-router v1.3.3-0.20220405142336-8ea9a06dc0e3
|
||||
github.com/containerd/containerd => github.com/k3s-io/containerd v1.5.10-k3s1 // k3s-release/1.5
|
||||
github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
|
||||
github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
|
||||
|
@ -69,6 +70,7 @@ require (
|
|||
github.com/containerd/containerd v1.6.0-rc.1
|
||||
github.com/containerd/fuse-overlayfs-snapshotter v1.0.4
|
||||
github.com/containerd/stargz-snapshotter v0.11.0
|
||||
github.com/coreos/go-iptables v0.6.0
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
|
||||
github.com/docker/docker v20.10.10+incompatible
|
||||
github.com/erikdubbelboer/gspt v0.0.0-20190125194910-e68493906b83
|
||||
|
|
4
go.sum
4
go.sum
|
@ -199,8 +199,6 @@ github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ
|
|||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k=
|
||||
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||
github.com/cloudnativelabs/kube-router v1.3.2 h1:OBnFEP8IIIiWDAWd25QXDtyXDQi6GxR0DHOP+EXcpNI=
|
||||
github.com/cloudnativelabs/kube-router v1.3.2/go.mod h1:bu7wbMiNX44Rx7mSCcvgNot2jVHuaBDu/z5ygcEtAJY=
|
||||
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
|
@ -725,6 +723,8 @@ github.com/k3s-io/klog v1.0.0-k3s2 h1:yyvD2bQbxG7m85/pvNctLX2bUDmva5kOBvuZ77tTGB
|
|||
github.com/k3s-io/klog v1.0.0-k3s2/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
github.com/k3s-io/klog/v2 v2.30.0-k3s1 h1:ia3JK7rveULJj3VtxhYEW80g4h2YncRzsZeAWP8DbpA=
|
||||
github.com/k3s-io/klog/v2 v2.30.0-k3s1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
github.com/k3s-io/kube-router v1.3.3-0.20220405142336-8ea9a06dc0e3 h1:Fm8ZV0dgaoCGshnwWhRgnyWVfFtdVYar+sYU5Ne67fk=
|
||||
github.com/k3s-io/kube-router v1.3.3-0.20220405142336-8ea9a06dc0e3/go.mod h1:sxZiFDEBgbjXM3SHhxJpV3701TuWIXN+JIQd5k9bDls=
|
||||
github.com/k3s-io/kubernetes v1.23.5-k3s1 h1:97ixDegoESbNaFo+fL33XU01OUo3PZF4zwYuY6gj8RQ=
|
||||
github.com/k3s-io/kubernetes v1.23.5-k3s1/go.mod h1:avI3LUTUYZugxwh52KMVM7v9ZjB5gYJ6D3FIoZ1SHUo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.23.5-k3s1 h1:fL3tdeKVqFfh6foAT/A7F4zjoI9rxFpw8ksRlHhGcGM=
|
||||
|
|
|
@ -15,8 +15,12 @@ import (
|
|||
"github.com/cloudnativelabs/kube-router/pkg/healthcheck"
|
||||
"github.com/cloudnativelabs/kube-router/pkg/options"
|
||||
"github.com/cloudnativelabs/kube-router/pkg/utils"
|
||||
"github.com/coreos/go-iptables/iptables"
|
||||
"github.com/k3s-io/k3s/pkg/daemons/config"
|
||||
"github.com/k3s-io/k3s/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1core "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
@ -50,7 +54,9 @@ func Run(ctx context.Context, nodeConfig *config.Node) error {
|
|||
}
|
||||
|
||||
krConfig := options.NewKubeRouterConfig()
|
||||
krConfig.ClusterIPCIDR = nodeConfig.AgentConfig.ServiceCIDR.String()
|
||||
krConfig.ClusterIPCIDR = util.JoinIPNets(nodeConfig.AgentConfig.ServiceCIDRs)
|
||||
krConfig.EnableIPv4 = true
|
||||
krConfig.EnableIPv6 = nodeConfig.AgentConfig.EnableIPv6
|
||||
krConfig.NodePortRange = strings.ReplaceAll(nodeConfig.AgentConfig.ServiceNodePortRange.String(), "-", ":")
|
||||
krConfig.HostnameOverride = nodeConfig.AgentConfig.NodeName
|
||||
krConfig.MetricsEnabled = false
|
||||
|
@ -71,6 +77,35 @@ func Run(ctx context.Context, nodeConfig *config.Node) error {
|
|||
informerFactory.Start(stopCh)
|
||||
informerFactory.WaitForCacheSync(stopCh)
|
||||
|
||||
iptablesCmdHandlers := make(map[v1core.IPFamily]utils.IPTablesHandler, 2)
|
||||
ipSetHandlers := make(map[v1core.IPFamily]utils.IPSetHandler, 2)
|
||||
|
||||
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create iptables handler")
|
||||
}
|
||||
iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler
|
||||
|
||||
ipset, err := utils.NewIPSet(false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create ipset handler")
|
||||
}
|
||||
ipSetHandlers[v1core.IPv4Protocol] = ipset
|
||||
|
||||
if nodeConfig.AgentConfig.EnableIPv6 {
|
||||
ipt6Handler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create iptables handler")
|
||||
}
|
||||
iptablesCmdHandlers[v1core.IPv6Protocol] = ipt6Handler
|
||||
|
||||
ipset, err := utils.NewIPSet(true)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create ipset handler")
|
||||
}
|
||||
ipSetHandlers[v1core.IPv6Protocol] = ipset
|
||||
}
|
||||
|
||||
// Start kube-router healthcheck server. Netpol requires it
|
||||
hc, err := healthcheck.NewHealthController(krConfig)
|
||||
if err != nil {
|
||||
|
@ -83,7 +118,8 @@ func Run(ctx context.Context, nodeConfig *config.Node) error {
|
|||
wg.Add(1)
|
||||
go hc.RunCheck(healthCh, stopCh, &wg)
|
||||
|
||||
npc, err := netpol.NewNetworkPolicyController(client, krConfig, podInformer, npInformer, nsInformer, &sync.Mutex{})
|
||||
npc, err := netpol.NewNetworkPolicyController(client, krConfig, podInformer, npInformer, nsInformer, &sync.Mutex{},
|
||||
iptablesCmdHandlers, ipSetHandlers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -522,22 +522,11 @@ func validateNetworkConfiguration(serverConfig server.Config) error {
|
|||
// Dual-stack operation requires fairly extensive manual configuration at the moment - do some
|
||||
// preflight checks to make sure that the user isn't trying to use flannel/npc, or trying to
|
||||
// enable dual-stack DNS (which we don't currently support since it's not easy to template)
|
||||
dualCluster, err := utilsnet.IsDualStackCIDRs(serverConfig.ControlConfig.ClusterIPRanges)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate cluster-cidr")
|
||||
}
|
||||
dualService, err := utilsnet.IsDualStackCIDRs(serverConfig.ControlConfig.ServiceIPRanges)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate service-cidr")
|
||||
}
|
||||
dualDNS, err := utilsnet.IsDualStackIPs(serverConfig.ControlConfig.ClusterDNSs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate cluster-dns")
|
||||
}
|
||||
|
||||
if (serverConfig.ControlConfig.DisableNPC == false) && (dualCluster || dualService) {
|
||||
return errors.New("network policy enforcement is not compatible with dual-stack operation; server must be restarted with --disable-network-policy")
|
||||
}
|
||||
if dualDNS == true {
|
||||
return errors.New("dual-stack cluster-dns is not supported")
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue