Refactor egress-selector pods mode to watch pods

Watching pods appears to be the most reliable way to ensure that the
proxy routes and authorizes connections.

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
pull/5658/head v1.24.1-rc4+k3s1
Brad Davidson 2022-06-07 03:43:07 -07:00 committed by Brad Davidson
parent c00f953ef9
commit d3242bea3c
4 changed files with 291 additions and 194 deletions

View File

@ -15,7 +15,6 @@ import (
daemonconfig "github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/version"
"github.com/pkg/errors"
"github.com/rancher/remotedialer"
"github.com/sirupsen/logrus"
"github.com/yl2chen/cidranger"
@ -29,14 +28,27 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
toolswatch "k8s.io/client-go/tools/watch"
"k8s.io/kubectl/pkg/util/podutils"
)
var (
ports = map[string]bool{
"10250": true,
"10010": true,
}
)
type agentTunnel struct {
client kubernetes.Interface
cidrs cidranger.Ranger
ports map[string]bool
mode string
}
// explicit interface check
var _ cidranger.RangerEntry = &podEntry{}
type podEntry struct {
cidr net.IPNet
hostNet bool
}
func (p *podEntry) Network() net.IPNet {
return p.cidr
}
func Setup(ctx context.Context, config *daemonconfig.Node, proxy proxy.Proxy) error {
restConfig, err := clientcmd.BuildConfigFromFlags("", config.AgentConfig.KubeConfigK3sController)
@ -62,14 +74,25 @@ func Setup(ctx context.Context, config *daemonconfig.Node, proxy proxy.Proxy) er
tunnel := &agentTunnel{
client: client,
cidrs: cidranger.NewPCTrieRanger(),
ports: map[string]bool{},
mode: config.EgressSelectorMode,
}
if tunnel.mode == daemonconfig.EgressSelectorModeCluster {
for _, cidr := range config.AgentConfig.ClusterCIDRs {
logrus.Infof("Tunnel authorizer added Cluster CIDR %s", cidr)
tunnel.cidrs.Insert(cidranger.NewBasicRangerEntry(*cidr))
apiServerReady := make(chan struct{})
go func() {
if err := util.WaitForAPIServerReady(ctx, config.AgentConfig.KubeConfigKubelet, util.DefaultAPIServerReadyTimeout); err != nil {
logrus.Fatalf("Tunnel watches failed to wait for apiserver ready: %v", err)
}
close(apiServerReady)
}()
switch tunnel.mode {
case daemonconfig.EgressSelectorModeCluster:
// In Cluster mode, we allow the cluster CIDRs, and any connections to the node's IPs for pods using host network.
tunnel.clusterAuth(config)
case daemonconfig.EgressSelectorModePod:
// In Pod mode, we watch pods assigned to this node, and allow their addresses, as well as ports used by containers with host network.
go tunnel.watchPods(ctx, apiServerReady, config)
}
// The loadbalancer is only disabled when there is a local apiserver. Servers without a local
@ -93,79 +116,9 @@ func Setup(ctx context.Context, config *daemonconfig.Node, proxy proxy.Proxy) er
}
}
// Attempt to connect to supervisors, storing their cancellation function for later when we
// need to disconnect.
disconnect := map[string]context.CancelFunc{}
wg := &sync.WaitGroup{}
for _, address := range proxy.SupervisorAddresses() {
if _, ok := disconnect[address]; !ok {
disconnect[address] = tunnel.connect(ctx, wg, address, tlsConfig)
}
}
// Once the apiserver is up, go into a watch loop, adding and removing tunnels as endpoints come
// and go from the cluster.
go func() {
if err := util.WaitForAPIServerReady(ctx, config.AgentConfig.KubeConfigKubelet, util.DefaultAPIServerReadyTimeout); err != nil {
logrus.Warnf("Tunnel endpoint watch failed to wait for apiserver ready: %v", err)
}
endpoints := client.CoreV1().Endpoints(metav1.NamespaceDefault)
fieldSelector := fields.Set{metav1.ObjectNameField: "kubernetes"}.String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return endpoints.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return endpoints.Watch(ctx, options)
},
}
_, _, watch, done := toolswatch.NewIndexerInformerWatcher(lw, &v1.Endpoints{})
defer func() {
watch.Stop()
<-done
}()
for {
select {
case <-ctx.Done():
return
case ev, ok := <-watch.ResultChan():
endpoint, ok := ev.Object.(*v1.Endpoints)
if !ok {
logrus.Errorf("Tunnel watch failed: event object not of type v1.Endpoints")
continue
}
newAddresses := util.GetAddresses(endpoint)
if reflect.DeepEqual(newAddresses, proxy.SupervisorAddresses()) {
continue
}
proxy.Update(newAddresses)
validEndpoint := map[string]bool{}
for _, address := range proxy.SupervisorAddresses() {
validEndpoint[address] = true
if _, ok := disconnect[address]; !ok {
disconnect[address] = tunnel.connect(ctx, nil, address, tlsConfig)
}
}
for address, cancel := range disconnect {
if !validEndpoint[address] {
cancel()
delete(disconnect, address)
logrus.Infof("Stopped tunnel to %s", address)
}
}
}
}
}()
go tunnel.watchEndpoints(ctx, apiServerReady, wg, tlsConfig, proxy)
wait := make(chan int, 1)
go func() {
@ -183,11 +136,166 @@ func Setup(ctx context.Context, config *daemonconfig.Node, proxy proxy.Proxy) er
return nil
}
type agentTunnel struct {
sync.Mutex
client kubernetes.Interface
cidrs cidranger.Ranger
mode string
func (a *agentTunnel) clusterAuth(config *daemonconfig.Node) {
// In Cluster mode, we add static entries for the Node IPs and Cluster CIDRs
for _, ip := range config.AgentConfig.NodeIPs {
if cidr, err := util.IPToIPNet(ip); err == nil {
logrus.Infof("Tunnel authorizer adding Node IP %s", cidr)
a.cidrs.Insert(&podEntry{cidr: *cidr})
}
}
for _, cidr := range config.AgentConfig.ClusterCIDRs {
logrus.Infof("Tunnel authorizer adding Cluster CIDR %s", cidr)
a.cidrs.Insert(&podEntry{cidr: *cidr})
}
}
// watchPods watches for pods assigned to this node, adding their IPs to the CIDR list.
// If the pod uses host network, we instead add the
func (a *agentTunnel) watchPods(ctx context.Context, apiServerReady <-chan struct{}, config *daemonconfig.Node) {
for _, ip := range config.AgentConfig.NodeIPs {
if cidr, err := util.IPToIPNet(ip); err == nil {
logrus.Infof("Tunnel authorizer adding Node IP %s", cidr)
a.cidrs.Insert(&podEntry{cidr: *cidr, hostNet: true})
}
}
<-apiServerReady
nodeName := os.Getenv("NODE_NAME")
pods := a.client.CoreV1().Pods(metav1.NamespaceNone)
fieldSelector := fields.Set{"spec.nodeName": nodeName}.String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return pods.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return pods.Watch(ctx, options)
},
}
logrus.Infof("Tunnnel authorizer watching Pods")
_, _, watch, done := toolswatch.NewIndexerInformerWatcher(lw, &v1.Pod{})
defer func() {
watch.Stop()
<-done
}()
for {
select {
case <-ctx.Done():
return
case ev, ok := <-watch.ResultChan():
pod, ok := ev.Object.(*v1.Pod)
if !ok {
logrus.Errorf("Tunnel watch failed: event object not of type v1.Pod")
continue
}
ready := podutils.IsPodReady(pod)
if pod.Spec.HostNetwork {
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.Protocol == v1.ProtocolTCP {
containerPort := fmt.Sprint(port.ContainerPort)
if ready {
logrus.Debugf("Tunnel authorizer adding Node Port %s", containerPort)
a.ports[containerPort] = true
} else {
logrus.Debugf("Tunnel authorizer removing Node Port %s", containerPort)
delete(a.ports, containerPort)
}
}
}
}
} else {
for _, ip := range pod.Status.PodIPs {
if cidr, err := util.IPStringToIPNet(ip.IP); err == nil {
if ready {
logrus.Debugf("Tunnel authorizer adding Pod IP %s", cidr)
a.cidrs.Insert(&podEntry{cidr: *cidr})
} else {
logrus.Debugf("Tunnel authorizer removing Pod IP %s", cidr)
a.cidrs.Remove(*cidr)
}
}
}
}
}
}
}
// WatchEndpoints attempts to create tunnels to all supervisor addresses. Once the
// apiserver is up, go into a watch loop, adding and removing tunnels as endpoints come
// and go from the cluster.
func (a *agentTunnel) watchEndpoints(ctx context.Context, apiServerReady <-chan struct{}, wg *sync.WaitGroup, tlsConfig *tls.Config, proxy proxy.Proxy) {
// Attempt to connect to supervisors, storing their cancellation function for later when we
// need to disconnect.
disconnect := map[string]context.CancelFunc{}
for _, address := range proxy.SupervisorAddresses() {
if _, ok := disconnect[address]; !ok {
disconnect[address] = a.connect(ctx, wg, address, tlsConfig)
}
}
<-apiServerReady
endpoints := a.client.CoreV1().Endpoints(metav1.NamespaceDefault)
fieldSelector := fields.Set{metav1.ObjectNameField: "kubernetes"}.String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return endpoints.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return endpoints.Watch(ctx, options)
},
}
_, _, watch, done := toolswatch.NewIndexerInformerWatcher(lw, &v1.Endpoints{})
defer func() {
watch.Stop()
<-done
}()
for {
select {
case <-ctx.Done():
return
case ev, ok := <-watch.ResultChan():
endpoint, ok := ev.Object.(*v1.Endpoints)
if !ok {
logrus.Errorf("Tunnel watch failed: event object not of type v1.Endpoints")
continue
}
newAddresses := util.GetAddresses(endpoint)
if reflect.DeepEqual(newAddresses, proxy.SupervisorAddresses()) {
continue
}
proxy.Update(newAddresses)
validEndpoint := map[string]bool{}
for _, address := range proxy.SupervisorAddresses() {
validEndpoint[address] = true
if _, ok := disconnect[address]; !ok {
disconnect[address] = a.connect(ctx, nil, address, tlsConfig)
}
}
for address, cancel := range disconnect {
if !validEndpoint[address] {
cancel()
delete(disconnect, address)
logrus.Infof("Stopped tunnel to %s", address)
}
}
}
}
}
// authorized determines whether or not a dial request is authorized.
@ -198,46 +306,24 @@ func (a *agentTunnel) authorized(ctx context.Context, proto, address string) boo
logrus.Debugf("Tunnel authorizer checking dial request for %s", address)
host, port, err := net.SplitHostPort(address)
if err == nil {
if proto == "tcp" && ports[port] && (host == "127.0.0.1" || host == "::1") {
if proto == "tcp" && daemonconfig.KubeletReservedPorts[port] && (host == "127.0.0.1" || host == "::1") {
return true
}
if ip := net.ParseIP(host); ip != nil {
// lazy populate pod cidrs from the node object
if a.mode == daemonconfig.EgressSelectorModePod && a.cidrs.Len() == 0 {
if err := a.updatePodCIDRs(ctx); err != nil {
logrus.Warnf("Tunnel authorizer failed to update CIDRs: %v", err)
}
}
if nets, err := a.cidrs.ContainingNetworks(ip); err == nil && len(nets) > 0 {
return true
if p, ok := nets[0].(*podEntry); ok {
if p.hostNet {
return proto == "tcp" && a.ports[port]
}
return true
}
logrus.Debugf("Tunnel authorizer CIDR lookup returned unknown type for address %s", ip)
}
}
}
return false
}
func (a *agentTunnel) updatePodCIDRs(ctx context.Context) error {
a.Lock()
defer a.Unlock()
// Return early if another goroutine updated the CIDRs while we were waiting to lock
if a.cidrs.Len() != 0 {
return nil
}
nodeName := os.Getenv("NODE_NAME")
logrus.Debugf("Tunnel authorizer getting Pod CIDRs for %s", nodeName)
node, err := a.client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return errors.Wrap(err, "failed to get Pod CIDRs")
}
for _, cidr := range node.Spec.PodCIDRs {
if _, n, err := net.ParseCIDR(cidr); err == nil {
logrus.Infof("Tunnel authorizer added Pod CIDR %s", cidr)
a.cidrs.Insert(cidranger.NewBasicRangerEntry(*n))
}
}
return nil
}
// connect initiates a connection to the remotedialer server. Incoming dial requests from
// the server will be checked by the authorizer function prior to being fulfilled.
func (a *agentTunnel) connect(rootCtx context.Context, waitGroup *sync.WaitGroup, address string, tlsConfig *tls.Config) context.CancelFunc {

View File

@ -30,8 +30,17 @@ const (
EgressSelectorModeDisabled = "disabled"
EgressSelectorModePod = "pod"
CertificateRenewDays = 90
StreamServerPort = "10010"
KubeletPort = "10250"
)
// These ports can always be accessed via the tunnel server, at the loopback address.
// Other addresses and ports are only accessible via the tunnel on newer agents, when used by a pod.
var KubeletReservedPorts = map[string]bool{
StreamServerPort: true,
KubeletPort: true,
}
type Node struct {
ContainerRuntimeEndpoint string
NoFlannel bool

View File

@ -11,12 +11,18 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/daemons/control/proxy"
"github.com/k3s-io/k3s/pkg/generated/clientset/versioned/scheme"
"github.com/k3s-io/k3s/pkg/nodeconfig"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/version"
"github.com/pkg/errors"
"github.com/rancher/remotedialer"
"github.com/sirupsen/logrus"
"github.com/yl2chen/cidranger"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/client-go/kubernetes"
)
@ -67,9 +73,9 @@ type TunnelServer struct {
var _ cidranger.RangerEntry = &tunnelEntry{}
type tunnelEntry struct {
cidr net.IPNet
node string
kubelet bool
cidr net.IPNet
nodeName string
node bool
}
func (n *tunnelEntry) Network() net.IPNet {
@ -98,10 +104,9 @@ func (t *TunnelServer) watch(ctx context.Context) {
for {
if t.config.Runtime.Core != nil {
t.config.Runtime.Core.Core().V1().Node().OnChange(ctx, version.Program+"-tunnel-server", t.onChangeNode)
if t.config.EgressSelectorMode == config.EgressSelectorModeCluster {
// Cluster mode watches Endpoints to find what Node is hosting an Endpoint address, as the CNI
// may be using its own IPAM that does not repsect the Node's PodCIDR.
t.config.Runtime.Core.Core().V1().Endpoints().OnChange(ctx, version.Program+"-tunnel-server", t.onChangeEndpoints)
switch t.config.EgressSelectorMode {
case config.EgressSelectorModeCluster, config.EgressSelectorModePod:
t.config.Runtime.Core.Core().V1().Pod().OnChange(ctx, version.Program+"-tunnel-server", t.onChangePod)
}
return
}
@ -110,41 +115,22 @@ func (t *TunnelServer) watch(ctx context.Context) {
}
}
// onChangeNode updates the node address/CIDR mappings by observing changes to nodes.
// Node addresses are updated in Agent, Cluster, and Pod mode.
// Pod CIDRs are updated only in Pod mode
// onChangeNode updates the node address mappings by observing changes to nodes.
func (t *TunnelServer) onChangeNode(nodeName string, node *v1.Node) (*v1.Node, error) {
if node != nil {
t.Lock()
defer t.Unlock()
logrus.Debugf("Tunnel server egress proxy updating node %s", nodeName)
_, t.egress[nodeName] = node.Labels[nodeconfig.ClusterEgressLabel]
// Add all node IP addresses
for _, addr := range node.Status.Addresses {
if addr.Type == v1.NodeInternalIP || addr.Type == v1.NodeExternalIP {
address := addr.Address
if strings.Contains(address, ":") {
address += "/128"
} else {
address += "/32"
}
if _, n, err := net.ParseCIDR(address); err == nil {
if n, err := util.IPStringToIPNet(addr.Address); err == nil {
if node.DeletionTimestamp != nil {
logrus.Debugf("Tunnel server egress proxy removing Node %s IP %v", nodeName, n)
t.cidrs.Remove(*n)
} else {
t.cidrs.Insert(&tunnelEntry{cidr: *n, node: nodeName, kubelet: true})
}
}
}
}
// Add all Node PodCIDRs, if in pod mode
if t.config.EgressSelectorMode == config.EgressSelectorModePod {
for _, cidr := range node.Spec.PodCIDRs {
if _, n, err := net.ParseCIDR(cidr); err == nil {
if node.DeletionTimestamp != nil {
t.cidrs.Remove(*n)
} else {
t.cidrs.Insert(&tunnelEntry{cidr: *n, node: nodeName})
logrus.Debugf("Tunnel server egress proxy updating Node %s IP %v", nodeName, n)
t.cidrs.Insert(&tunnelEntry{cidr: *n, nodeName: nodeName, node: true})
}
}
}
@ -153,45 +139,29 @@ func (t *TunnelServer) onChangeNode(nodeName string, node *v1.Node) (*v1.Node, e
return node, nil
}
// onChangeEndpoits updates the pod address mappings by observing changes to endpoints.
// Only Pod endpoints with a defined NodeName are used, and only in Cluster mode.
func (t *TunnelServer) onChangeEndpoints(endpointsName string, endpoints *v1.Endpoints) (*v1.Endpoints, error) {
if endpoints != nil {
// onChangePod updates the pod address mappings by observing changes to pods.
func (t *TunnelServer) onChangePod(podName string, pod *v1.Pod) (*v1.Pod, error) {
if pod != nil {
t.Lock()
defer t.Unlock()
logrus.Debugf("Tunnel server egress proxy updating endpoints %s", endpointsName)
// Add all Pod endpoints
for _, subset := range endpoints.Subsets {
for _, addr := range subset.Addresses {
if addr.NodeName != nil && addr.TargetRef != nil && addr.TargetRef.Kind == "Pod" {
nodeName := *addr.NodeName
address := addr.IP
if strings.Contains(address, ":") {
address += "/128"
// Add all pod IPs, unless the pod uses host network
if !pod.Spec.HostNetwork {
nodeName := pod.Spec.NodeName
for _, ip := range pod.Status.PodIPs {
if cidr, err := util.IPStringToIPNet(ip.IP); err == nil {
if pod.DeletionTimestamp != nil {
logrus.Debugf("Tunnel server egress proxy removing Node %s Pod IP %v", nodeName, cidr)
t.cidrs.Remove(*cidr)
} else {
address += "/32"
}
if _, n, err := net.ParseCIDR(address); err == nil {
t.cidrs.Insert(&tunnelEntry{cidr: *n, node: nodeName})
}
}
}
for _, addr := range subset.NotReadyAddresses {
if addr.TargetRef != nil && addr.TargetRef.Kind == "Pod" {
address := addr.IP
if strings.Contains(address, ":") {
address += "/128"
} else {
address += "/32"
}
if _, n, err := net.ParseCIDR(address); err == nil {
t.cidrs.Remove(*n)
logrus.Debugf("Tunnel server egress proxy updating Node %s Pod IP %s", nodeName, cidr)
t.cidrs.Insert(&tunnelEntry{cidr: *cidr, nodeName: nodeName})
}
}
}
}
}
return endpoints, nil
return pod, nil
}
// serveConnect attempts to handle the HTTP CONNECT request by dialing
@ -199,20 +169,29 @@ func (t *TunnelServer) onChangeEndpoints(endpointsName string, endpoints *v1.End
func (t *TunnelServer) serveConnect(resp http.ResponseWriter, req *http.Request) {
bconn, err := t.dialBackend(req.Host)
if err != nil {
http.Error(resp, fmt.Sprintf("no tunnels available: %v", err), http.StatusInternalServerError)
responsewriters.ErrorNegotiated(
apierrors.NewInternalError(errors.Wrap(err, "no tunnels available")),
scheme.Codecs.WithoutConversion(), schema.GroupVersion{}, resp, req,
)
return
}
hijacker, ok := resp.(http.Hijacker)
if !ok {
http.Error(resp, "hijacking not supported", http.StatusInternalServerError)
responsewriters.ErrorNegotiated(
apierrors.NewInternalError(errors.New("hijacking not supported")),
scheme.Codecs.WithoutConversion(), schema.GroupVersion{}, resp, req,
)
return
}
resp.WriteHeader(http.StatusOK)
rconn, _, err := hijacker.Hijack()
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
responsewriters.ErrorNegotiated(
apierrors.NewInternalError(err),
scheme.Codecs.WithoutConversion(), schema.GroupVersion{}, resp, req,
)
return
}
@ -231,46 +210,46 @@ func (t *TunnelServer) dialBackend(addr string) (net.Conn, error) {
}
loopback := t.config.Loopback()
var node string
var nodeName string
var toKubelet, useTunnel bool
if ip := net.ParseIP(host); ip != nil {
// Destination is an IP address, check to see if the target is a kubelet or pod address.
// Destination is an IP address, which could be either a pod, or node by IP.
// We can only use the tunnel for egress to pods if the agent supports it.
if nets, err := t.cidrs.ContainingNetworks(ip); err == nil && len(nets) > 0 {
if n, ok := nets[0].(*tunnelEntry); ok {
node = n.node
if n.kubelet {
nodeName = n.nodeName
if n.node && config.KubeletReservedPorts[port] {
toKubelet = true
useTunnel = true
} else {
useTunnel = t.egress[node]
useTunnel = t.egress[nodeName]
}
} else {
logrus.Debugf("Tunnel server egress proxy CIDR lookup returned unknown type for address %s", ip)
}
}
} else {
// Destination is a kubelet by name, it is safe to use the tunnel.
node = host
// Destination is a node by name, it is safe to use the tunnel.
nodeName = host
toKubelet = true
useTunnel = true
}
// Always dial kubelets via the loopback address.
// Always dial kubelet via the loopback address.
if toKubelet {
addr = fmt.Sprintf("%s:%s", loopback, port)
}
// If connecting to something hosted by the local node, don't tunnel
if node == t.config.ServerNodeName {
if nodeName == t.config.ServerNodeName {
useTunnel = false
}
if t.server.HasSession(node) {
if t.server.HasSession(nodeName) {
if useTunnel {
// Have a session and it is safe to use for this destination, do so.
logrus.Debugf("Tunnel server egress proxy dialing %s via session to %s", addr, node)
return t.server.Dial(node, 15*time.Second, "tcp", addr)
logrus.Debugf("Tunnel server egress proxy dialing %s via session to %s", addr, nodeName)
return t.server.Dial(nodeName, 15*time.Second, "tcp", addr)
}
// Have a session but the agent doesn't support tunneling to this destination or
// the destination is local; fall back to direct connection.

View File

@ -256,3 +256,26 @@ func IsIPv6OnlyCIDRs(cidrs []*net.IPNet) (bool, error) {
return !v4Found && v6Found, nil
}
// IPToIPNet converts an IP to an IPNet, using a fully filled mask appropriate for the address family.
func IPToIPNet(ip net.IP) (*net.IPNet, error) {
address := ip.String()
if strings.Contains(address, ":") {
address += "/128"
} else {
address += "/32"
}
_, cidr, err := net.ParseCIDR(address)
return cidr, err
}
// IPStringToIPNet converts an IP string to an IPNet, using a fully filled mask appropriate for the address family.
func IPStringToIPNet(address string) (*net.IPNet, error) {
if strings.Contains(address, ":") {
address += "/128"
} else {
address += "/32"
}
_, cidr, err := net.ParseCIDR(address)
return cidr, err
}