2019-01-01 08:23:01 +00:00
|
|
|
package tunnel
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"crypto/tls"
|
2024-03-19 22:01:36 +00:00
|
|
|
"errors"
|
2019-01-01 08:23:01 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2022-04-04 21:54:50 +00:00
|
|
|
"os"
|
2022-10-05 21:11:01 +00:00
|
|
|
"strconv"
|
2019-01-01 08:23:01 +00:00
|
|
|
"sync"
|
2022-10-05 21:11:01 +00:00
|
|
|
"time"
|
2019-01-01 08:23:01 +00:00
|
|
|
|
|
|
|
"github.com/gorilla/websocket"
|
2022-03-29 18:36:48 +00:00
|
|
|
agentconfig "github.com/k3s-io/k3s/pkg/agent/config"
|
2024-11-15 22:11:47 +00:00
|
|
|
"github.com/k3s-io/k3s/pkg/agent/loadbalancer"
|
2022-03-02 23:47:27 +00:00
|
|
|
"github.com/k3s-io/k3s/pkg/agent/proxy"
|
2024-11-22 01:47:17 +00:00
|
|
|
"github.com/k3s-io/k3s/pkg/clientaccess"
|
2022-05-17 19:25:43 +00:00
|
|
|
daemonconfig "github.com/k3s-io/k3s/pkg/daemons/config"
|
2022-03-02 23:47:27 +00:00
|
|
|
"github.com/k3s-io/k3s/pkg/util"
|
|
|
|
"github.com/k3s-io/k3s/pkg/version"
|
2019-05-09 22:05:51 +00:00
|
|
|
"github.com/rancher/remotedialer"
|
2019-01-01 08:23:01 +00:00
|
|
|
"github.com/sirupsen/logrus"
|
2022-04-04 21:54:50 +00:00
|
|
|
"github.com/yl2chen/cidranger"
|
2022-10-20 19:34:41 +00:00
|
|
|
authorizationv1 "k8s.io/api/authorization/v1"
|
2019-06-26 21:26:20 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/fields"
|
2022-04-21 20:56:39 +00:00
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
2024-11-22 01:47:17 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
2022-10-05 21:11:01 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2022-04-21 20:56:39 +00:00
|
|
|
"k8s.io/apimachinery/pkg/watch"
|
2019-06-26 21:26:20 +00:00
|
|
|
"k8s.io/client-go/kubernetes"
|
2019-10-27 05:53:25 +00:00
|
|
|
"k8s.io/client-go/rest"
|
2022-04-21 20:56:39 +00:00
|
|
|
"k8s.io/client-go/tools/cache"
|
|
|
|
toolswatch "k8s.io/client-go/tools/watch"
|
2022-10-20 23:59:55 +00:00
|
|
|
"k8s.io/kubernetes/pkg/cluster/ports"
|
2019-01-01 08:23:01 +00:00
|
|
|
)
|
|
|
|
|
2023-04-03 20:51:26 +00:00
|
|
|
var (
|
|
|
|
endpointDebounceDelay = time.Second
|
2024-07-09 23:36:29 +00:00
|
|
|
defaultDialer = net.Dialer{}
|
2023-04-03 20:51:26 +00:00
|
|
|
)
|
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
type agentTunnel struct {
|
2022-10-05 21:11:01 +00:00
|
|
|
client kubernetes.Interface
|
|
|
|
cidrs cidranger.Ranger
|
|
|
|
ports map[string]bool
|
|
|
|
mode string
|
2024-07-09 23:36:29 +00:00
|
|
|
kubeletAddr string
|
2022-10-05 21:11:01 +00:00
|
|
|
kubeletPort string
|
2023-03-13 20:48:02 +00:00
|
|
|
startTime time.Time
|
2022-06-07 10:43:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// explicit interface check
|
|
|
|
var _ cidranger.RangerEntry = &podEntry{}
|
|
|
|
|
|
|
|
type podEntry struct {
|
|
|
|
cidr net.IPNet
|
|
|
|
hostNet bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *podEntry) Network() net.IPNet {
|
|
|
|
return p.cidr
|
|
|
|
}
|
2019-01-01 08:23:01 +00:00
|
|
|
|
2022-05-17 19:25:43 +00:00
|
|
|
func Setup(ctx context.Context, config *daemonconfig.Node, proxy proxy.Proxy) error {
|
2022-12-07 18:08:53 +00:00
|
|
|
client, err := util.GetClientSet(config.AgentConfig.KubeConfigK3sController)
|
2019-01-01 08:23:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-11-21 23:48:49 +00:00
|
|
|
nodeRestConfig, err := util.GetRESTConfig(config.AgentConfig.KubeConfigKubelet)
|
2019-10-27 05:53:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsConfig, err := rest.TLSConfigFor(nodeRestConfig)
|
2019-06-26 21:26:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-04-04 21:54:50 +00:00
|
|
|
tunnel := &agentTunnel{
|
2023-03-13 20:48:02 +00:00
|
|
|
client: client,
|
|
|
|
cidrs: cidranger.NewPCTrieRanger(),
|
|
|
|
ports: map[string]bool{},
|
|
|
|
mode: config.EgressSelectorMode,
|
2024-07-09 23:36:29 +00:00
|
|
|
kubeletAddr: config.AgentConfig.ListenAddress,
|
2022-10-20 23:59:55 +00:00
|
|
|
kubeletPort: fmt.Sprint(ports.KubeletPort),
|
2023-03-13 20:48:02 +00:00
|
|
|
startTime: time.Now().Truncate(time.Second),
|
2022-05-17 19:25:43 +00:00
|
|
|
}
|
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
apiServerReady := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
if err := util.WaitForAPIServerReady(ctx, config.AgentConfig.KubeConfigKubelet, util.DefaultAPIServerReadyTimeout); err != nil {
|
|
|
|
logrus.Fatalf("Tunnel watches failed to wait for apiserver ready: %v", err)
|
2022-05-17 19:25:43 +00:00
|
|
|
}
|
2022-10-20 19:34:41 +00:00
|
|
|
if err := util.WaitForRBACReady(ctx, config.AgentConfig.KubeConfigK3sController, util.DefaultAPIServerReadyTimeout, authorizationv1.ResourceAttributes{
|
|
|
|
Namespace: metav1.NamespaceDefault,
|
|
|
|
Verb: "list",
|
|
|
|
Resource: "endpoints",
|
|
|
|
}, ""); err != nil {
|
|
|
|
logrus.Fatalf("Tunnel watches failed to wait for RBAC: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
close(apiServerReady)
|
|
|
|
}()
|
|
|
|
|
2023-04-20 22:02:04 +00:00
|
|
|
// We don't need to run the tunnel authorizer if the container runtime endpoint is /dev/null,
|
|
|
|
// signifying that this is an agentless server that will not register a node.
|
|
|
|
if config.ContainerRuntimeEndpoint != "/dev/null" {
|
|
|
|
// Allow the kubelet port, as published via our node object.
|
|
|
|
go tunnel.setKubeletPort(ctx, apiServerReady)
|
|
|
|
|
|
|
|
switch tunnel.mode {
|
|
|
|
case daemonconfig.EgressSelectorModeCluster:
|
|
|
|
// In Cluster mode, we allow the cluster CIDRs, and any connections to the node's IPs for pods using host network.
|
|
|
|
tunnel.clusterAuth(config)
|
|
|
|
case daemonconfig.EgressSelectorModePod:
|
|
|
|
// In Pod mode, we watch pods assigned to this node, and allow their addresses, as well as ports used by containers with host network.
|
|
|
|
go tunnel.watchPods(ctx, apiServerReady, config)
|
|
|
|
}
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 00:31:07 +00:00
|
|
|
// The loadbalancer is only disabled when there is a local apiserver. Servers without a local
|
|
|
|
// apiserver load-balance to themselves initially, then switch over to an apiserver node as soon
|
|
|
|
// as we get some addresses from the code below.
|
2024-07-12 17:24:52 +00:00
|
|
|
var localSupervisorDefault bool
|
|
|
|
if addresses := proxy.SupervisorAddresses(); len(addresses) > 0 {
|
|
|
|
host, _, _ := net.SplitHostPort(addresses[0])
|
|
|
|
if host == "127.0.0.1" || host == "::1" {
|
|
|
|
localSupervisorDefault = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-15 00:31:07 +00:00
|
|
|
if proxy.IsSupervisorLBEnabled() && proxy.SupervisorURL() != "" {
|
|
|
|
logrus.Info("Getting list of apiserver endpoints from server")
|
|
|
|
// If not running an apiserver locally, try to get a list of apiservers from the server we're
|
|
|
|
// connecting to. If that fails, fall back to querying the endpoints list from Kubernetes. This
|
|
|
|
// fallback requires that the server we're joining be running an apiserver, but is the only safe
|
|
|
|
// thing to do if its supervisor is down-level and can't provide us with an endpoint list.
|
2024-11-22 01:47:17 +00:00
|
|
|
addresses := agentconfig.WaitForAPIServers(ctx, config, proxy)
|
2024-07-12 17:24:52 +00:00
|
|
|
if len(addresses) > 0 {
|
2024-11-22 01:47:17 +00:00
|
|
|
logrus.Infof("Got apiserver addresses from supervisor: %v", addresses)
|
2024-07-12 17:24:52 +00:00
|
|
|
if localSupervisorDefault {
|
|
|
|
proxy.SetSupervisorDefault(addresses[0])
|
|
|
|
}
|
2022-04-15 00:31:07 +00:00
|
|
|
proxy.Update(addresses)
|
|
|
|
} else {
|
2024-11-22 01:47:17 +00:00
|
|
|
if endpoint, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}); err != nil {
|
|
|
|
logrus.Errorf("Failed to get apiserver addresses from kubernetes endpoints: %v", err)
|
|
|
|
} else {
|
|
|
|
addresses := util.GetAddresses(endpoint)
|
2024-07-12 17:24:52 +00:00
|
|
|
logrus.Infof("Got apiserver addresses from kubernetes endpoints: %v", addresses)
|
|
|
|
if len(addresses) > 0 {
|
2022-04-15 00:31:07 +00:00
|
|
|
proxy.Update(addresses)
|
|
|
|
}
|
2022-03-29 18:36:48 +00:00
|
|
|
}
|
2021-02-12 15:35:57 +00:00
|
|
|
}
|
2019-06-26 21:26:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
wg := &sync.WaitGroup{}
|
|
|
|
|
2024-11-22 01:47:17 +00:00
|
|
|
go tunnel.watchEndpoints(ctx, apiServerReady, wg, tlsConfig, config, proxy)
|
2022-06-07 10:43:07 +00:00
|
|
|
|
|
|
|
wait := make(chan int, 1)
|
2019-06-26 21:26:20 +00:00
|
|
|
go func() {
|
2022-06-07 10:43:07 +00:00
|
|
|
wg.Wait()
|
|
|
|
wait <- 0
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
logrus.Error("Tunnel context canceled while waiting for connection")
|
|
|
|
return ctx.Err()
|
|
|
|
case <-wait:
|
|
|
|
}
|
2022-04-21 20:56:39 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-10-05 21:11:01 +00:00
|
|
|
// setKubeletPort retrieves the configured kubelet port from our node object
|
|
|
|
func (a *agentTunnel) setKubeletPort(ctx context.Context, apiServerReady <-chan struct{}) {
|
|
|
|
<-apiServerReady
|
|
|
|
|
2024-07-18 19:45:19 +00:00
|
|
|
wait.PollUntilContextTimeout(ctx, time.Second, util.DefaultAPIServerReadyTimeout, true, func(ctx context.Context) (bool, error) {
|
2023-03-13 20:48:02 +00:00
|
|
|
var readyTime metav1.Time
|
2022-10-05 21:11:01 +00:00
|
|
|
nodeName := os.Getenv("NODE_NAME")
|
|
|
|
node, err := a.client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("Tunnel authorizer failed to get Kubelet Port: %v", err)
|
|
|
|
return false, nil
|
|
|
|
}
|
2023-03-13 20:48:02 +00:00
|
|
|
for _, cond := range node.Status.Conditions {
|
|
|
|
if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue {
|
|
|
|
readyTime = cond.LastHeartbeatTime
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if readyTime.Time.Before(a.startTime) {
|
|
|
|
logrus.Debugf("Waiting for Ready condition to be updated for Kubelet Port assignment")
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
kubeletPort := strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)
|
|
|
|
if kubeletPort == "0" {
|
|
|
|
logrus.Debugf("Waiting for Kubelet Port to be set")
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
a.kubeletPort = kubeletPort
|
2024-07-09 23:36:29 +00:00
|
|
|
logrus.Infof("Tunnel authorizer set Kubelet Port %s", net.JoinHostPort(a.kubeletAddr, a.kubeletPort))
|
2022-10-05 21:11:01 +00:00
|
|
|
return true, nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
func (a *agentTunnel) clusterAuth(config *daemonconfig.Node) {
|
|
|
|
// In Cluster mode, we add static entries for the Node IPs and Cluster CIDRs
|
|
|
|
for _, ip := range config.AgentConfig.NodeIPs {
|
|
|
|
if cidr, err := util.IPToIPNet(ip); err == nil {
|
|
|
|
logrus.Infof("Tunnel authorizer adding Node IP %s", cidr)
|
|
|
|
a.cidrs.Insert(&podEntry{cidr: *cidr})
|
2022-04-21 20:56:39 +00:00
|
|
|
}
|
2022-06-07 10:43:07 +00:00
|
|
|
}
|
|
|
|
for _, cidr := range config.AgentConfig.ClusterCIDRs {
|
|
|
|
logrus.Infof("Tunnel authorizer adding Cluster CIDR %s", cidr)
|
|
|
|
a.cidrs.Insert(&podEntry{cidr: *cidr})
|
|
|
|
}
|
|
|
|
}
|
2022-04-21 20:56:39 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
// watchPods watches for pods assigned to this node, adding their IPs to the CIDR list.
|
|
|
|
// If the pod uses host network, we instead add the
|
|
|
|
func (a *agentTunnel) watchPods(ctx context.Context, apiServerReady <-chan struct{}, config *daemonconfig.Node) {
|
|
|
|
for _, ip := range config.AgentConfig.NodeIPs {
|
|
|
|
if cidr, err := util.IPToIPNet(ip); err == nil {
|
|
|
|
logrus.Infof("Tunnel authorizer adding Node IP %s", cidr)
|
|
|
|
a.cidrs.Insert(&podEntry{cidr: *cidr, hostNet: true})
|
|
|
|
}
|
|
|
|
}
|
2022-04-21 20:56:39 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
<-apiServerReady
|
2022-04-21 20:56:39 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
nodeName := os.Getenv("NODE_NAME")
|
|
|
|
pods := a.client.CoreV1().Pods(metav1.NamespaceNone)
|
|
|
|
fieldSelector := fields.Set{"spec.nodeName": nodeName}.String()
|
|
|
|
lw := &cache.ListWatch{
|
|
|
|
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
|
|
|
|
options.FieldSelector = fieldSelector
|
|
|
|
return pods.List(ctx, options)
|
|
|
|
},
|
|
|
|
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
|
|
|
|
options.FieldSelector = fieldSelector
|
|
|
|
return pods.Watch(ctx, options)
|
|
|
|
},
|
|
|
|
}
|
2019-06-26 21:26:20 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
logrus.Infof("Tunnnel authorizer watching Pods")
|
|
|
|
_, _, watch, done := toolswatch.NewIndexerInformerWatcher(lw, &v1.Pod{})
|
2019-07-10 21:50:23 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
defer func() {
|
|
|
|
watch.Stop()
|
|
|
|
<-done
|
|
|
|
}()
|
2019-07-10 21:50:23 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case ev, ok := <-watch.ResultChan():
|
|
|
|
pod, ok := ev.Object.(*v1.Pod)
|
|
|
|
if !ok {
|
|
|
|
logrus.Errorf("Tunnel watch failed: event object not of type v1.Pod")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if pod.Spec.HostNetwork {
|
|
|
|
for _, container := range pod.Spec.Containers {
|
|
|
|
for _, port := range container.Ports {
|
|
|
|
if port.Protocol == v1.ProtocolTCP {
|
|
|
|
containerPort := fmt.Sprint(port.ContainerPort)
|
2022-06-09 21:05:24 +00:00
|
|
|
if pod.DeletionTimestamp != nil {
|
2022-06-07 10:43:07 +00:00
|
|
|
logrus.Debugf("Tunnel authorizer removing Node Port %s", containerPort)
|
|
|
|
delete(a.ports, containerPort)
|
2022-06-09 21:05:24 +00:00
|
|
|
} else {
|
|
|
|
logrus.Debugf("Tunnel authorizer adding Node Port %s", containerPort)
|
|
|
|
a.ports[containerPort] = true
|
2022-06-07 10:43:07 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-26 21:26:20 +00:00
|
|
|
}
|
2021-11-01 06:20:16 +00:00
|
|
|
}
|
2022-06-07 10:43:07 +00:00
|
|
|
} else {
|
|
|
|
for _, ip := range pod.Status.PodIPs {
|
|
|
|
if cidr, err := util.IPStringToIPNet(ip.IP); err == nil {
|
2022-06-09 21:05:24 +00:00
|
|
|
if pod.DeletionTimestamp != nil {
|
2022-06-07 10:43:07 +00:00
|
|
|
logrus.Debugf("Tunnel authorizer removing Pod IP %s", cidr)
|
|
|
|
a.cidrs.Remove(*cidr)
|
2022-06-09 21:05:24 +00:00
|
|
|
} else {
|
|
|
|
logrus.Debugf("Tunnel authorizer adding Pod IP %s", cidr)
|
|
|
|
a.cidrs.Insert(&podEntry{cidr: *cidr})
|
2022-06-07 10:43:07 +00:00
|
|
|
}
|
2019-06-26 21:26:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-06-07 10:43:07 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-26 21:26:20 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
// WatchEndpoints attempts to create tunnels to all supervisor addresses. Once the
|
|
|
|
// apiserver is up, go into a watch loop, adding and removing tunnels as endpoints come
|
|
|
|
// and go from the cluster.
|
2024-11-22 01:47:17 +00:00
|
|
|
func (a *agentTunnel) watchEndpoints(ctx context.Context, apiServerReady <-chan struct{}, wg *sync.WaitGroup, tlsConfig *tls.Config, node *daemonconfig.Node, proxy proxy.Proxy) {
|
|
|
|
syncProxyAddresses := a.getProxySyncer(ctx, wg, tlsConfig, proxy)
|
|
|
|
refreshFromSupervisor := getAPIServersRequester(node, proxy, syncProxyAddresses)
|
2019-07-18 01:15:15 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
<-apiServerReady
|
2024-11-22 01:47:17 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
endpoints := a.client.CoreV1().Endpoints(metav1.NamespaceDefault)
|
|
|
|
fieldSelector := fields.Set{metav1.ObjectNameField: "kubernetes"}.String()
|
|
|
|
lw := &cache.ListWatch{
|
|
|
|
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
|
2024-11-22 01:47:17 +00:00
|
|
|
// if we're being called to re-list, then likely there was an
|
|
|
|
// interruption to the apiserver connection and the listwatch is retrying
|
|
|
|
// its connection. This is a good suggestion that it might be necessary
|
|
|
|
// to refresh the apiserver address from the supervisor.
|
|
|
|
go refreshFromSupervisor(ctx)
|
2022-06-07 10:43:07 +00:00
|
|
|
options.FieldSelector = fieldSelector
|
|
|
|
return endpoints.List(ctx, options)
|
|
|
|
},
|
|
|
|
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
|
|
|
|
options.FieldSelector = fieldSelector
|
|
|
|
return endpoints.Watch(ctx, options)
|
|
|
|
},
|
2019-07-18 01:15:15 +00:00
|
|
|
}
|
2019-07-18 00:13:40 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
_, _, watch, done := toolswatch.NewIndexerInformerWatcher(lw, &v1.Endpoints{})
|
2019-06-26 21:26:20 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
defer func() {
|
|
|
|
watch.Stop()
|
|
|
|
<-done
|
|
|
|
}()
|
|
|
|
|
2023-04-03 20:51:26 +00:00
|
|
|
var cancelUpdate context.CancelFunc
|
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2023-04-03 20:51:26 +00:00
|
|
|
if cancelUpdate != nil {
|
|
|
|
cancelUpdate()
|
|
|
|
}
|
2022-06-07 10:43:07 +00:00
|
|
|
return
|
|
|
|
case ev, ok := <-watch.ResultChan():
|
|
|
|
endpoint, ok := ev.Object.(*v1.Endpoints)
|
|
|
|
if !ok {
|
|
|
|
logrus.Errorf("Tunnel watch failed: event object not of type v1.Endpoints")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-04-03 20:51:26 +00:00
|
|
|
if cancelUpdate != nil {
|
|
|
|
cancelUpdate()
|
2022-06-07 10:43:07 +00:00
|
|
|
}
|
|
|
|
|
2023-04-03 20:51:26 +00:00
|
|
|
var debounceCtx context.Context
|
|
|
|
debounceCtx, cancelUpdate = context.WithCancel(ctx)
|
|
|
|
|
2023-07-04 11:02:06 +00:00
|
|
|
// When joining the cluster, the apiserver adds, removes, and then re-adds itself to
|
2023-04-03 20:51:26 +00:00
|
|
|
// the endpoint list several times. This causes a bit of thrashing if we react to
|
|
|
|
// endpoint changes immediately. Instead, perform the endpoint update in a
|
|
|
|
// goroutine that sleeps for a short period before checking for changes and updating
|
|
|
|
// the proxy addresses. If another update occurs, the previous update operation
|
|
|
|
// will be cancelled and a new one queued.
|
2024-11-22 01:47:17 +00:00
|
|
|
go syncProxyAddresses(debounceCtx, util.GetAddresses(endpoint))
|
2022-06-07 10:43:07 +00:00
|
|
|
}
|
|
|
|
}
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// authorized determines whether or not a dial request is authorized.
|
|
|
|
// Connections to the local kubelet ports are allowed.
|
|
|
|
// Connections to other IPs are allowed if they are contained in a CIDR managed by this node.
|
|
|
|
// All other requests are rejected.
|
|
|
|
func (a *agentTunnel) authorized(ctx context.Context, proto, address string) bool {
|
|
|
|
logrus.Debugf("Tunnel authorizer checking dial request for %s", address)
|
|
|
|
host, port, err := net.SplitHostPort(address)
|
|
|
|
if err == nil {
|
2024-07-09 23:36:29 +00:00
|
|
|
if a.isKubeletOrStreamPort(proto, host, port) {
|
2022-04-04 21:54:50 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
if ip := net.ParseIP(host); ip != nil {
|
|
|
|
if nets, err := a.cidrs.ContainingNetworks(ip); err == nil && len(nets) > 0 {
|
2022-06-07 10:43:07 +00:00
|
|
|
if p, ok := nets[0].(*podEntry); ok {
|
|
|
|
if p.hostNet {
|
|
|
|
return proto == "tcp" && a.ports[port]
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
logrus.Debugf("Tunnel authorizer CIDR lookup returned unknown type for address %s", ip)
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-03-19 22:01:36 +00:00
|
|
|
type agentConnection struct {
|
2024-11-15 22:11:47 +00:00
|
|
|
cancel context.CancelFunc
|
|
|
|
healthCheck loadbalancer.HealthCheckFunc
|
2024-03-19 22:01:36 +00:00
|
|
|
}
|
|
|
|
|
2022-04-04 21:54:50 +00:00
|
|
|
// connect initiates a connection to the remotedialer server. Incoming dial requests from
|
|
|
|
// the server will be checked by the authorizer function prior to being fulfilled.
|
2024-03-19 22:01:36 +00:00
|
|
|
func (a *agentTunnel) connect(rootCtx context.Context, waitGroup *sync.WaitGroup, address string, tlsConfig *tls.Config) agentConnection {
|
2024-11-15 22:11:47 +00:00
|
|
|
var status loadbalancer.HealthCheckResult
|
|
|
|
|
2020-05-05 22:09:04 +00:00
|
|
|
wsURL := fmt.Sprintf("wss://%s/v1-"+version.Program+"/connect", address)
|
2019-10-27 05:53:25 +00:00
|
|
|
ws := &websocket.Dialer{
|
|
|
|
TLSClientConfig: tlsConfig,
|
2019-01-01 08:23:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
once := sync.Once{}
|
2019-06-26 21:26:20 +00:00
|
|
|
if waitGroup != nil {
|
|
|
|
waitGroup.Add(1)
|
|
|
|
}
|
|
|
|
|
2019-07-18 12:00:07 +00:00
|
|
|
ctx, cancel := context.WithCancel(rootCtx)
|
2024-03-19 22:01:36 +00:00
|
|
|
auth := func(proto, address string) bool {
|
|
|
|
return a.authorized(rootCtx, proto, address)
|
|
|
|
}
|
|
|
|
|
|
|
|
onConnect := func(_ context.Context, _ *remotedialer.Session) error {
|
2024-11-15 22:11:47 +00:00
|
|
|
status = loadbalancer.HealthCheckResultOK
|
2024-03-19 22:01:36 +00:00
|
|
|
logrus.WithField("url", wsURL).Info("Remotedialer connected to proxy")
|
|
|
|
if waitGroup != nil {
|
|
|
|
once.Do(waitGroup.Done)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-01-01 08:23:01 +00:00
|
|
|
|
2024-03-19 22:01:36 +00:00
|
|
|
// Start remotedialer connect loop in a goroutine to ensure a connection to the target server
|
2019-01-01 08:23:01 +00:00
|
|
|
go func() {
|
|
|
|
for {
|
2024-03-19 22:01:36 +00:00
|
|
|
// ConnectToProxy blocks until error or context cancellation
|
2024-07-09 23:36:29 +00:00
|
|
|
err := remotedialer.ConnectToProxyWithDialer(ctx, wsURL, nil, auth, ws, a.dialContext, onConnect)
|
2024-11-15 22:11:47 +00:00
|
|
|
status = loadbalancer.HealthCheckResultFailed
|
2024-03-19 22:01:36 +00:00
|
|
|
if err != nil && !errors.Is(err, context.Canceled) {
|
2024-06-03 20:40:09 +00:00
|
|
|
logrus.WithField("url", wsURL).WithError(err).Error("Remotedialer proxy error; reconnecting...")
|
2024-03-19 22:01:36 +00:00
|
|
|
// wait between reconnection attempts to avoid hammering the server
|
|
|
|
time.Sleep(endpointDebounceDelay)
|
|
|
|
}
|
|
|
|
// If the context has been cancelled, exit the goroutine instead of retrying
|
2019-06-26 21:26:20 +00:00
|
|
|
if ctx.Err() != nil {
|
2019-07-18 00:13:40 +00:00
|
|
|
if waitGroup != nil {
|
|
|
|
once.Do(waitGroup.Done)
|
|
|
|
}
|
2019-06-26 21:26:20 +00:00
|
|
|
return
|
|
|
|
}
|
2019-01-01 08:23:01 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2024-03-19 22:01:36 +00:00
|
|
|
return agentConnection{
|
2024-11-15 22:11:47 +00:00
|
|
|
cancel: cancel,
|
|
|
|
healthCheck: func() loadbalancer.HealthCheckResult {
|
|
|
|
return status
|
|
|
|
},
|
2024-03-19 22:01:36 +00:00
|
|
|
}
|
2019-01-01 08:23:01 +00:00
|
|
|
}
|
2022-10-05 21:11:01 +00:00
|
|
|
|
2024-07-09 23:36:29 +00:00
|
|
|
// isKubeletOrStreamPort returns true if the connection is to a reserved TCP port on a loopback address.
|
|
|
|
func (a *agentTunnel) isKubeletOrStreamPort(proto, host, port string) bool {
|
2022-10-05 21:11:01 +00:00
|
|
|
return proto == "tcp" && (host == "127.0.0.1" || host == "::1") && (port == a.kubeletPort || port == daemonconfig.StreamServerPort)
|
|
|
|
}
|
2024-07-09 23:36:29 +00:00
|
|
|
|
|
|
|
// dialContext dials a local connection on behalf of the remote server. If the
|
|
|
|
// connection is to the kubelet port on the loopback address, the kubelet is dialed
|
|
|
|
// at its configured bind address. Otherwise, the connection is dialed normally.
|
|
|
|
func (a *agentTunnel) dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
|
|
|
host, port, err := net.SplitHostPort(address)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if a.isKubeletOrStreamPort(network, host, port) && port == a.kubeletPort {
|
|
|
|
address = net.JoinHostPort(a.kubeletAddr, port)
|
|
|
|
}
|
|
|
|
return defaultDialer.DialContext(ctx, network, address)
|
|
|
|
}
|
2024-11-22 01:47:17 +00:00
|
|
|
|
|
|
|
// proxySyncer is a common signature for functions that sync the proxy address list with a context
|
|
|
|
type proxySyncer func(ctx context.Context, addresses []string)
|
|
|
|
|
|
|
|
// getProxySyncer returns a function that can be called to update the list of supervisors.
|
|
|
|
// This function is responsible for connecting to or disconnecting websocket tunnels,
|
|
|
|
// as well as updating the proxy loadbalancer server list.
|
|
|
|
func (a *agentTunnel) getProxySyncer(ctx context.Context, wg *sync.WaitGroup, tlsConfig *tls.Config, proxy proxy.Proxy) proxySyncer {
|
|
|
|
disconnect := map[string]context.CancelFunc{}
|
|
|
|
// Attempt to connect to supervisors, storing their cancellation function for later when we
|
|
|
|
// need to disconnect.
|
|
|
|
for _, address := range proxy.SupervisorAddresses() {
|
|
|
|
if _, ok := disconnect[address]; !ok {
|
|
|
|
conn := a.connect(ctx, wg, address, tlsConfig)
|
|
|
|
disconnect[address] = conn.cancel
|
|
|
|
proxy.SetHealthCheck(address, conn.healthCheck)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// return a function that can be called to update the address list.
|
|
|
|
// servers will be connected to or disconnected from as necessary,
|
|
|
|
// and the proxy addresses updated.
|
|
|
|
return func(debounceCtx context.Context, addresses []string) {
|
|
|
|
select {
|
|
|
|
case <-time.After(endpointDebounceDelay):
|
|
|
|
case <-debounceCtx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-12-07 00:30:25 +00:00
|
|
|
// Compare list of supervisor addresses before and after syncing apiserver
|
|
|
|
// endpoints into the proxy to figure out which supervisors we need to connect to
|
|
|
|
// or disconnect from. Note that the addresses we were passed will not match
|
|
|
|
// the supervisor addresses if the supervisor and apiserver are on different ports -
|
|
|
|
// they must be round-tripped through proxy.Update before comparing.
|
2024-11-22 01:47:17 +00:00
|
|
|
curAddresses := sets.New(proxy.SupervisorAddresses()...)
|
|
|
|
proxy.Update(addresses)
|
2024-12-07 00:30:25 +00:00
|
|
|
newAddresses := sets.New(proxy.SupervisorAddresses()...)
|
2024-11-22 01:47:17 +00:00
|
|
|
|
|
|
|
// add new servers
|
|
|
|
for address := range newAddresses.Difference(curAddresses) {
|
|
|
|
if _, ok := disconnect[address]; !ok {
|
|
|
|
conn := a.connect(ctx, nil, address, tlsConfig)
|
|
|
|
logrus.Infof("Started tunnel to %s", address)
|
|
|
|
disconnect[address] = conn.cancel
|
|
|
|
proxy.SetHealthCheck(address, conn.healthCheck)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove old servers
|
|
|
|
for address := range curAddresses.Difference(newAddresses) {
|
|
|
|
if cancel, ok := disconnect[address]; ok {
|
|
|
|
cancel()
|
|
|
|
delete(disconnect, address)
|
|
|
|
logrus.Infof("Stopped tunnel to %s", address)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getAPIServersRequester returns a function that can be called to update the
|
|
|
|
// proxy apiserver endpoints with addresses retrieved from the supervisor.
|
|
|
|
func getAPIServersRequester(node *daemonconfig.Node, proxy proxy.Proxy, syncProxyAddresses proxySyncer) func(ctx context.Context) {
|
|
|
|
var info *clientaccess.Info
|
|
|
|
return func(ctx context.Context) {
|
|
|
|
if info == nil {
|
|
|
|
var err error
|
|
|
|
withCert := clientaccess.WithClientCertificate(node.AgentConfig.ClientKubeletCert, node.AgentConfig.ClientKubeletKey)
|
|
|
|
info, err = clientaccess.ParseAndValidateToken(proxy.SupervisorURL(), node.Token, withCert)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Warnf("Failed to validate server token: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if addresses, err := agentconfig.GetAPIServers(ctx, info); err != nil {
|
|
|
|
logrus.Warnf("Failed to get apiserver addresses from supervisor: %v", err)
|
|
|
|
} else {
|
|
|
|
syncProxyAddresses(ctx, addresses)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|