mirror of https://github.com/k3s-io/k3s
Remove control-plane egress context and fix agent mode.
The control-plane context handles requests outside the cluster and should not be sent to the proxy. In agent mode, we don't watch pods and just direct-dial any request for a non-node address, which is the original behavior. Signed-off-by: Brad Davidson <brad.davidson@rancher.com>pull/5680/head
parent
15b8fb962a
commit
7a323400e9
|
@ -28,7 +28,6 @@ import (
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
toolswatch "k8s.io/client-go/tools/watch"
|
toolswatch "k8s.io/client-go/tools/watch"
|
||||||
"k8s.io/kubectl/pkg/util/podutils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type agentTunnel struct {
|
type agentTunnel struct {
|
||||||
|
@ -194,18 +193,17 @@ func (a *agentTunnel) watchPods(ctx context.Context, apiServerReady <-chan struc
|
||||||
logrus.Errorf("Tunnel watch failed: event object not of type v1.Pod")
|
logrus.Errorf("Tunnel watch failed: event object not of type v1.Pod")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ready := podutils.IsPodReady(pod)
|
|
||||||
if pod.Spec.HostNetwork {
|
if pod.Spec.HostNetwork {
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
for _, port := range container.Ports {
|
for _, port := range container.Ports {
|
||||||
if port.Protocol == v1.ProtocolTCP {
|
if port.Protocol == v1.ProtocolTCP {
|
||||||
containerPort := fmt.Sprint(port.ContainerPort)
|
containerPort := fmt.Sprint(port.ContainerPort)
|
||||||
if ready {
|
if pod.DeletionTimestamp != nil {
|
||||||
logrus.Debugf("Tunnel authorizer adding Node Port %s", containerPort)
|
|
||||||
a.ports[containerPort] = true
|
|
||||||
} else {
|
|
||||||
logrus.Debugf("Tunnel authorizer removing Node Port %s", containerPort)
|
logrus.Debugf("Tunnel authorizer removing Node Port %s", containerPort)
|
||||||
delete(a.ports, containerPort)
|
delete(a.ports, containerPort)
|
||||||
|
} else {
|
||||||
|
logrus.Debugf("Tunnel authorizer adding Node Port %s", containerPort)
|
||||||
|
a.ports[containerPort] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -213,12 +211,12 @@ func (a *agentTunnel) watchPods(ctx context.Context, apiServerReady <-chan struc
|
||||||
} else {
|
} else {
|
||||||
for _, ip := range pod.Status.PodIPs {
|
for _, ip := range pod.Status.PodIPs {
|
||||||
if cidr, err := util.IPStringToIPNet(ip.IP); err == nil {
|
if cidr, err := util.IPStringToIPNet(ip.IP); err == nil {
|
||||||
if ready {
|
if pod.DeletionTimestamp != nil {
|
||||||
logrus.Debugf("Tunnel authorizer adding Pod IP %s", cidr)
|
|
||||||
a.cidrs.Insert(&podEntry{cidr: *cidr})
|
|
||||||
} else {
|
|
||||||
logrus.Debugf("Tunnel authorizer removing Pod IP %s", cidr)
|
logrus.Debugf("Tunnel authorizer removing Pod IP %s", cidr)
|
||||||
a.cidrs.Remove(*cidr)
|
a.cidrs.Remove(*cidr)
|
||||||
|
} else {
|
||||||
|
logrus.Debugf("Tunnel authorizer adding Pod IP %s", cidr)
|
||||||
|
a.cidrs.Insert(&podEntry{cidr: *cidr})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -723,33 +723,26 @@ func genEncryptionConfigAndState(controlConfig *config.Control) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func genEgressSelectorConfig(controlConfig *config.Control) error {
|
func genEgressSelectorConfig(controlConfig *config.Control) error {
|
||||||
direct := apiserver.Connection{
|
var clusterConn apiserver.Connection
|
||||||
ProxyProtocol: apiserver.ProtocolDirect,
|
|
||||||
}
|
|
||||||
|
|
||||||
proxy := apiserver.Connection{
|
if controlConfig.EgressSelectorMode == config.EgressSelectorModeDisabled {
|
||||||
ProxyProtocol: apiserver.ProtocolHTTPConnect,
|
clusterConn = apiserver.Connection{
|
||||||
Transport: &apiserver.Transport{
|
ProxyProtocol: apiserver.ProtocolDirect,
|
||||||
TCP: &apiserver.TCPTransport{
|
}
|
||||||
URL: fmt.Sprintf("https://%s:%d", controlConfig.Loopback(), controlConfig.SupervisorPort),
|
} else {
|
||||||
TLSConfig: &apiserver.TLSConfig{
|
clusterConn = apiserver.Connection{
|
||||||
CABundle: controlConfig.Runtime.ServerCA,
|
ProxyProtocol: apiserver.ProtocolHTTPConnect,
|
||||||
ClientKey: controlConfig.Runtime.ClientKubeAPIKey,
|
Transport: &apiserver.Transport{
|
||||||
ClientCert: controlConfig.Runtime.ClientKubeAPICert,
|
TCP: &apiserver.TCPTransport{
|
||||||
|
URL: fmt.Sprintf("https://%s:%d", controlConfig.Loopback(), controlConfig.SupervisorPort),
|
||||||
|
TLSConfig: &apiserver.TLSConfig{
|
||||||
|
CABundle: controlConfig.Runtime.ServerCA,
|
||||||
|
ClientKey: controlConfig.Runtime.ClientKubeAPIKey,
|
||||||
|
ClientCert: controlConfig.Runtime.ClientKubeAPICert,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
}
|
|
||||||
|
|
||||||
clusterConn := direct
|
|
||||||
controlConn := direct
|
|
||||||
|
|
||||||
switch controlConfig.EgressSelectorMode {
|
|
||||||
case config.EgressSelectorModeAgent:
|
|
||||||
controlConn = proxy
|
|
||||||
case config.EgressSelectorModeCluster, config.EgressSelectorModePod:
|
|
||||||
clusterConn = proxy
|
|
||||||
controlConn = proxy
|
|
||||||
}
|
}
|
||||||
|
|
||||||
egressConfig := apiserver.EgressSelectorConfiguration{
|
egressConfig := apiserver.EgressSelectorConfiguration{
|
||||||
|
@ -762,10 +755,6 @@ func genEgressSelectorConfig(controlConfig *config.Control) error {
|
||||||
Name: "cluster",
|
Name: "cluster",
|
||||||
Connection: clusterConn,
|
Connection: clusterConn,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "controlplane",
|
|
||||||
Connection: controlConn,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -245,19 +245,14 @@ func (t *TunnelServer) dialBackend(addr string) (net.Conn, error) {
|
||||||
useTunnel = false
|
useTunnel = false
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.server.HasSession(nodeName) {
|
if useTunnel && t.server.HasSession(nodeName) {
|
||||||
if useTunnel {
|
// Have a session and it is safe to use for this destination, do so.
|
||||||
// Have a session and it is safe to use for this destination, do so.
|
logrus.Debugf("Tunnel server egress proxy dialing %s via session to %s", addr, nodeName)
|
||||||
logrus.Debugf("Tunnel server egress proxy dialing %s via session to %s", addr, nodeName)
|
return t.server.Dial(nodeName, 15*time.Second, "tcp", addr)
|
||||||
return t.server.Dial(nodeName, 15*time.Second, "tcp", addr)
|
|
||||||
}
|
|
||||||
// Have a session but the agent doesn't support tunneling to this destination or
|
|
||||||
// the destination is local; fall back to direct connection.
|
|
||||||
logrus.Debugf("Tunnel server egress proxy dialing %s directly", addr)
|
|
||||||
return net.Dial("tcp", addr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// don't provide a proxy connection for anything else
|
// Don't have a session, the agent doesn't support tunneling to this destination, or
|
||||||
logrus.Debugf("Tunnel server egress proxy rejecting connection to %s", addr)
|
// the destination is local; fall back to direct connection.
|
||||||
return nil, fmt.Errorf("no sessions available for host %s", host)
|
logrus.Debugf("Tunnel server egress proxy dialing %s directly", addr)
|
||||||
|
return net.Dial("tcp", addr)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue