mirror of https://github.com/k3s-io/k3s
Add support for dual-stack Pod/Service CIDRs and node IP addresses (#3212)
* Add support for dual-stack cluster/service CIDRs and node addresses Signed-off-by: Brad Davidson <brad.davidson@rancher.com>pull/3229/head v1.21.0+k3s1
parent
ac507e530d
commit
2705431d96
1
go.mod
1
go.mod
|
@ -122,5 +122,6 @@ require (
|
||||||
k8s.io/klog v1.0.0
|
k8s.io/klog v1.0.0
|
||||||
k8s.io/kubectl v0.21.0
|
k8s.io/kubectl v0.21.0
|
||||||
k8s.io/kubernetes v1.21.0
|
k8s.io/kubernetes v1.21.0
|
||||||
|
k8s.io/utils v0.0.0-20201110183641-67b214c5f920
|
||||||
sigs.k8s.io/yaml v1.2.0
|
sigs.k8s.io/yaml v1.2.0
|
||||||
)
|
)
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
"github.com/rancher/k3s/pkg/clientaccess"
|
"github.com/rancher/k3s/pkg/clientaccess"
|
||||||
"github.com/rancher/k3s/pkg/daemons/config"
|
"github.com/rancher/k3s/pkg/daemons/config"
|
||||||
"github.com/rancher/k3s/pkg/daemons/control/deps"
|
"github.com/rancher/k3s/pkg/daemons/control/deps"
|
||||||
|
"github.com/rancher/k3s/pkg/util"
|
||||||
"github.com/rancher/k3s/pkg/version"
|
"github.com/rancher/k3s/pkg/version"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"k8s.io/apimachinery/pkg/util/json"
|
"k8s.io/apimachinery/pkg/util/json"
|
||||||
|
@ -41,7 +42,7 @@ func Get(ctx context.Context, agent cmds.Agent, proxy proxy.Proxy) *config.Node
|
||||||
for {
|
for {
|
||||||
agentConfig, err := get(ctx, &agent, proxy)
|
agentConfig, err := get(ctx, &agent, proxy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Failed to retrieve agent config: %v", err)
|
logrus.Errorf("Failed to configure agent: %v", err)
|
||||||
select {
|
select {
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(5 * time.Second):
|
||||||
continue
|
continue
|
||||||
|
@ -64,7 +65,7 @@ func Request(path string, info *clientaccess.Info, requester HTTPRequester) ([]b
|
||||||
return requester(u.String(), clientaccess.GetHTTPClient(info.CACerts), info.Username, info.Password)
|
return requester(u.String(), clientaccess.GetHTTPClient(info.CACerts), info.Username, info.Password)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile string) HTTPRequester {
|
func getNodeNamedCrt(nodeName string, nodeIPs []sysnet.IP, nodePasswordFile string) HTTPRequester {
|
||||||
return func(u string, client *http.Client, username, password string) ([]byte, error) {
|
return func(u string, client *http.Client, username, password string) ([]byte, error) {
|
||||||
req, err := http.NewRequest(http.MethodGet, u, nil)
|
req, err := http.NewRequest(http.MethodGet, u, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -81,7 +82,7 @@ func getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile string) HTTPRequester {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req.Header.Set(version.Program+"-Node-Password", nodePassword)
|
req.Header.Set(version.Program+"-Node-Password", nodePassword)
|
||||||
req.Header.Set(version.Program+"-Node-IP", nodeIP)
|
req.Header.Set(version.Program+"-Node-IP", util.JoinIPs(nodeIPs))
|
||||||
|
|
||||||
resp, err := client.Do(req)
|
resp, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -144,8 +145,8 @@ func upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile string)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getServingCert(nodeName, nodeIP, servingCertFile, servingKeyFile, nodePasswordFile string, info *clientaccess.Info) (*tls.Certificate, error) {
|
func getServingCert(nodeName string, nodeIPs []sysnet.IP, servingCertFile, servingKeyFile, nodePasswordFile string, info *clientaccess.Info) (*tls.Certificate, error) {
|
||||||
servingCert, err := Request("/v1-"+version.Program+"/serving-kubelet.crt", info, getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile))
|
servingCert, err := Request("/v1-"+version.Program+"/serving-kubelet.crt", info, getNodeNamedCrt(nodeName, nodeIPs, nodePasswordFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -207,9 +208,9 @@ func splitCertKeyPEM(bytes []byte) (certPem []byte, keyPem []byte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeNamedHostFile(filename, keyFile, nodeName, nodeIP, nodePasswordFile string, info *clientaccess.Info) error {
|
func getNodeNamedHostFile(filename, keyFile, nodeName string, nodeIPs []sysnet.IP, nodePasswordFile string, info *clientaccess.Info) error {
|
||||||
basename := filepath.Base(filename)
|
basename := filepath.Base(filename)
|
||||||
fileBytes, err := Request("/v1-"+version.Program+"/"+basename, info, getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile))
|
fileBytes, err := Request("/v1-"+version.Program+"/"+basename, info, getNodeNamedCrt(nodeName, nodeIPs, nodePasswordFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -224,21 +225,31 @@ func getNodeNamedHostFile(filename, keyFile, nodeName, nodeIP, nodePasswordFile
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHostnameAndIP(info cmds.Agent) (string, string, error) {
|
func getHostnameAndIPs(info cmds.Agent) (string, []sysnet.IP, error) {
|
||||||
ip := info.NodeIP
|
ips := []sysnet.IP{}
|
||||||
if ip == "" {
|
if len(info.NodeIP) == 0 {
|
||||||
hostIP, err := net.ChooseHostInterface()
|
hostIP, err := net.ChooseHostInterface()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", nil, err
|
||||||
|
}
|
||||||
|
ips = append(ips, hostIP)
|
||||||
|
} else {
|
||||||
|
for _, hostIP := range info.NodeIP {
|
||||||
|
for _, v := range strings.Split(hostIP, ",") {
|
||||||
|
ip := sysnet.ParseIP(v)
|
||||||
|
if ip == nil {
|
||||||
|
return "", nil, fmt.Errorf("invalid node-ip %s", v)
|
||||||
|
}
|
||||||
|
ips = append(ips, ip)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ip = hostIP.String()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
name := info.NodeName
|
name := info.NodeName
|
||||||
if name == "" {
|
if name == "" {
|
||||||
hostname, err := os.Hostname()
|
hostname, err := os.Hostname()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
name = hostname
|
name = hostname
|
||||||
}
|
}
|
||||||
|
@ -247,7 +258,7 @@ func getHostnameAndIP(info cmds.Agent) (string, string, error) {
|
||||||
// https://github.com/kubernetes/kubernetes/issues/71140
|
// https://github.com/kubernetes/kubernetes/issues/71140
|
||||||
name = strings.ToLower(name)
|
name = strings.ToLower(name)
|
||||||
|
|
||||||
return name, ip, nil
|
return name, ips, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func isValidResolvConf(resolvConfFile string) bool {
|
func isValidResolvConf(resolvConfFile string) bool {
|
||||||
|
@ -305,7 +316,7 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
|
||||||
|
|
||||||
controlConfig, err := getConfig(info)
|
controlConfig, err := getConfig(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errors.Wrap(err, "failed to retrieve configuration from server")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the supervisor and externally-facing apiserver are not on the same port, tell the proxy where to find the apiserver.
|
// If the supervisor and externally-facing apiserver are not on the same port, tell the proxy where to find the apiserver.
|
||||||
|
@ -349,7 +360,7 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
|
||||||
newNodePasswordFile := filepath.Join(nodeConfigPath, "password")
|
newNodePasswordFile := filepath.Join(nodeConfigPath, "password")
|
||||||
upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile)
|
upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile)
|
||||||
|
|
||||||
nodeName, nodeIP, err := getHostnameAndIP(*envInfo)
|
nodeName, nodeIPs, err := getHostnameAndIPs(*envInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -364,14 +375,14 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
|
||||||
|
|
||||||
os.Setenv("NODE_NAME", nodeName)
|
os.Setenv("NODE_NAME", nodeName)
|
||||||
|
|
||||||
servingCert, err := getServingCert(nodeName, nodeIP, servingKubeletCert, servingKubeletKey, newNodePasswordFile, info)
|
servingCert, err := getServingCert(nodeName, nodeIPs, servingKubeletCert, servingKubeletKey, newNodePasswordFile, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
clientKubeletCert := filepath.Join(envInfo.DataDir, "agent", "client-kubelet.crt")
|
clientKubeletCert := filepath.Join(envInfo.DataDir, "agent", "client-kubelet.crt")
|
||||||
clientKubeletKey := filepath.Join(envInfo.DataDir, "agent", "client-kubelet.key")
|
clientKubeletKey := filepath.Join(envInfo.DataDir, "agent", "client-kubelet.key")
|
||||||
if err := getNodeNamedHostFile(clientKubeletCert, clientKubeletKey, nodeName, nodeIP, newNodePasswordFile, info); err != nil {
|
if err := getNodeNamedHostFile(clientKubeletCert, clientKubeletKey, nodeName, nodeIPs, newNodePasswordFile, info); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -411,10 +422,8 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
|
||||||
}
|
}
|
||||||
nodeConfig.FlannelIface = flannelIface
|
nodeConfig.FlannelIface = flannelIface
|
||||||
nodeConfig.Images = filepath.Join(envInfo.DataDir, "agent", "images")
|
nodeConfig.Images = filepath.Join(envInfo.DataDir, "agent", "images")
|
||||||
nodeConfig.AgentConfig.NodeIP = nodeIP
|
|
||||||
nodeConfig.AgentConfig.NodeName = nodeName
|
nodeConfig.AgentConfig.NodeName = nodeName
|
||||||
nodeConfig.AgentConfig.NodeConfigPath = nodeConfigPath
|
nodeConfig.AgentConfig.NodeConfigPath = nodeConfigPath
|
||||||
nodeConfig.AgentConfig.NodeExternalIP = envInfo.NodeExternalIP
|
|
||||||
nodeConfig.AgentConfig.ServingKubeletCert = servingKubeletCert
|
nodeConfig.AgentConfig.ServingKubeletCert = servingKubeletCert
|
||||||
nodeConfig.AgentConfig.ServingKubeletKey = servingKubeletKey
|
nodeConfig.AgentConfig.ServingKubeletKey = servingKubeletKey
|
||||||
nodeConfig.AgentConfig.ClusterDNS = controlConfig.ClusterDNS
|
nodeConfig.AgentConfig.ClusterDNS = controlConfig.ClusterDNS
|
||||||
|
@ -458,6 +467,32 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
|
||||||
nodeConfig.Containerd.Template = filepath.Join(envInfo.DataDir, "agent", "etc", "containerd", "config.toml.tmpl")
|
nodeConfig.Containerd.Template = filepath.Join(envInfo.DataDir, "agent", "etc", "containerd", "config.toml.tmpl")
|
||||||
nodeConfig.Certificate = servingCert
|
nodeConfig.Certificate = servingCert
|
||||||
|
|
||||||
|
nodeConfig.AgentConfig.NodeIPs = nodeIPs
|
||||||
|
nodeIP, err := util.GetFirst4(nodeIPs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "cannot configure IPv4 node-ip")
|
||||||
|
}
|
||||||
|
nodeConfig.AgentConfig.NodeIP = nodeIP.String()
|
||||||
|
|
||||||
|
for _, externalIP := range envInfo.NodeExternalIP {
|
||||||
|
for _, v := range strings.Split(externalIP, ",") {
|
||||||
|
ip := sysnet.ParseIP(v)
|
||||||
|
if ip == nil {
|
||||||
|
return nil, fmt.Errorf("invalid node-external-ip %s", v)
|
||||||
|
}
|
||||||
|
nodeConfig.AgentConfig.NodeExternalIPs = append(nodeConfig.AgentConfig.NodeExternalIPs, ip)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if configured, set NodeExternalIP to the first IPv4 address, for legacy clients
|
||||||
|
if len(nodeConfig.AgentConfig.NodeExternalIPs) > 0 {
|
||||||
|
nodeExternalIP, err := util.GetFirst4(nodeConfig.AgentConfig.NodeExternalIPs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "cannot configure IPv4 node-external-ip")
|
||||||
|
}
|
||||||
|
nodeConfig.AgentConfig.NodeExternalIP = nodeExternalIP.String()
|
||||||
|
}
|
||||||
|
|
||||||
if nodeConfig.FlannelBackend == config.FlannelBackendNone {
|
if nodeConfig.FlannelBackend == config.FlannelBackendNone {
|
||||||
nodeConfig.NoFlannel = true
|
nodeConfig.NoFlannel = true
|
||||||
} else {
|
} else {
|
||||||
|
@ -488,27 +523,35 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
|
||||||
}
|
}
|
||||||
|
|
||||||
if controlConfig.ClusterIPRange != nil {
|
if controlConfig.ClusterIPRange != nil {
|
||||||
nodeConfig.AgentConfig.ClusterCIDR = *controlConfig.ClusterIPRange
|
nodeConfig.AgentConfig.ClusterCIDR = controlConfig.ClusterIPRange
|
||||||
|
nodeConfig.AgentConfig.ClusterCIDRs = []*sysnet.IPNet{controlConfig.ClusterIPRange}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(controlConfig.ClusterIPRanges) > 0 {
|
||||||
|
nodeConfig.AgentConfig.ClusterCIDRs = controlConfig.ClusterIPRanges
|
||||||
}
|
}
|
||||||
|
|
||||||
if controlConfig.ServiceIPRange != nil {
|
if controlConfig.ServiceIPRange != nil {
|
||||||
nodeConfig.AgentConfig.ServiceCIDR = *controlConfig.ServiceIPRange
|
nodeConfig.AgentConfig.ServiceCIDR = controlConfig.ServiceIPRange
|
||||||
|
nodeConfig.AgentConfig.ServiceCIDRs = []*sysnet.IPNet{controlConfig.ServiceIPRange}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(controlConfig.ServiceIPRanges) > 0 {
|
||||||
|
nodeConfig.AgentConfig.ServiceCIDRs = controlConfig.ServiceIPRanges
|
||||||
}
|
}
|
||||||
|
|
||||||
if controlConfig.ServiceNodePortRange != nil {
|
if controlConfig.ServiceNodePortRange != nil {
|
||||||
nodeConfig.AgentConfig.ServiceNodePortRange = *controlConfig.ServiceNodePortRange
|
nodeConfig.AgentConfig.ServiceNodePortRange = *controlConfig.ServiceNodePortRange
|
||||||
}
|
}
|
||||||
|
|
||||||
// Old versions of the server do not send enough information to correctly start the NPC. Users
|
if len(controlConfig.ClusterDNSs) == 0 {
|
||||||
// need to upgrade the server to at least the same version as the agent, or disable the NPC
|
nodeConfig.AgentConfig.ClusterDNSs = []sysnet.IP{controlConfig.ClusterDNS}
|
||||||
// cluster-wide.
|
} else {
|
||||||
if controlConfig.DisableNPC == false && (controlConfig.ServiceIPRange == nil || controlConfig.ServiceNodePortRange == nil) {
|
nodeConfig.AgentConfig.ClusterDNSs = controlConfig.ClusterDNSs
|
||||||
return nil, fmt.Errorf("incompatible down-level server detected; servers must be upgraded to at least %s, or restarted with --disable-network-policy", version.Version)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeConfig.AgentConfig.ExtraKubeletArgs = envInfo.ExtraKubeletArgs
|
nodeConfig.AgentConfig.ExtraKubeletArgs = envInfo.ExtraKubeletArgs
|
||||||
nodeConfig.AgentConfig.ExtraKubeProxyArgs = envInfo.ExtraKubeProxyArgs
|
nodeConfig.AgentConfig.ExtraKubeProxyArgs = envInfo.ExtraKubeProxyArgs
|
||||||
|
|
||||||
nodeConfig.AgentConfig.NodeTaints = envInfo.Taints
|
nodeConfig.AgentConfig.NodeTaints = envInfo.Taints
|
||||||
nodeConfig.AgentConfig.NodeLabels = envInfo.Labels
|
nodeConfig.AgentConfig.NodeLabels = envInfo.Labels
|
||||||
nodeConfig.AgentConfig.PrivateRegistry = envInfo.PrivateRegistry
|
nodeConfig.AgentConfig.PrivateRegistry = envInfo.PrivateRegistry
|
||||||
|
@ -520,6 +563,10 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
|
||||||
nodeConfig.AgentConfig.PodManifests = filepath.Join(envInfo.DataDir, "agent", DefaultPodManifestPath)
|
nodeConfig.AgentConfig.PodManifests = filepath.Join(envInfo.DataDir, "agent", DefaultPodManifestPath)
|
||||||
nodeConfig.AgentConfig.ProtectKernelDefaults = envInfo.ProtectKernelDefaults
|
nodeConfig.AgentConfig.ProtectKernelDefaults = envInfo.ProtectKernelDefaults
|
||||||
|
|
||||||
|
if err := validateNetworkConfig(nodeConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return nodeConfig, nil
|
return nodeConfig, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -532,3 +579,15 @@ func getConfig(info *clientaccess.Info) (*config.Control, error) {
|
||||||
controlControl := &config.Control{}
|
controlControl := &config.Control{}
|
||||||
return controlControl, json.Unmarshal(data, controlControl)
|
return controlControl, json.Unmarshal(data, controlControl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateNetworkConfig ensures that the network configuration values provided by the server make sense.
|
||||||
|
func validateNetworkConfig(nodeConfig *config.Node) error {
|
||||||
|
// Old versions of the server do not send enough information to correctly start the NPC. Users
|
||||||
|
// need to upgrade the server to at least the same version as the agent, or disable the NPC
|
||||||
|
// cluster-wide.
|
||||||
|
if nodeConfig.AgentConfig.DisableNPC == false && (nodeConfig.AgentConfig.ServiceCIDR == nil || nodeConfig.AgentConfig.ServiceNodePortRange.Size == 0) {
|
||||||
|
return fmt.Errorf("incompatible down-level server detected; servers must be upgraded to at least %s, or restarted with --disable-network-policy", version.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -592,7 +592,7 @@ func NewNetworkPolicyController(clientset kubernetes.Interface,
|
||||||
// be up to date with all of the policy changes from any enqueued request after that
|
// be up to date with all of the policy changes from any enqueued request after that
|
||||||
npc.fullSyncRequestChan = make(chan struct{}, 1)
|
npc.fullSyncRequestChan = make(chan struct{}, 1)
|
||||||
|
|
||||||
npc.serviceClusterIPRange = config.AgentConfig.ServiceCIDR
|
npc.serviceClusterIPRange = *config.AgentConfig.ServiceCIDR
|
||||||
npc.serviceNodePortRange = strings.ReplaceAll(config.AgentConfig.ServiceNodePortRange.String(), "-", ":")
|
npc.serviceNodePortRange = strings.ReplaceAll(config.AgentConfig.ServiceNodePortRange.String(), "-", ":")
|
||||||
npc.syncPeriod = defaultSyncPeriod
|
npc.syncPeriod = defaultSyncPeriod
|
||||||
|
|
||||||
|
|
|
@ -253,15 +253,17 @@ func testForMissingOrUnwanted(t *testing.T, targetMsg string, got []podInfo, wan
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMinimalNodeConfig(clusterIPCIDR string, nodePortRange string, hostNameOverride string, externalIPs []string) *config.Node {
|
func newMinimalNodeConfig(serviceIPCIDR string, nodePortRange string, hostNameOverride string, externalIPs []string) *config.Node {
|
||||||
nodeConfig := &config.Node{AgentConfig: config.Agent{}}
|
nodeConfig := &config.Node{AgentConfig: config.Agent{}}
|
||||||
|
|
||||||
if clusterIPCIDR != "" {
|
if serviceIPCIDR != "" {
|
||||||
_, cidr, err := net.ParseCIDR(clusterIPCIDR)
|
_, cidr, err := net.ParseCIDR(serviceIPCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("failed to get parse --service-cluster-ip-range parameter: " + err.Error())
|
panic("failed to get parse --service-cluster-ip-range parameter: " + err.Error())
|
||||||
}
|
}
|
||||||
nodeConfig.AgentConfig.ClusterCIDR = *cidr
|
nodeConfig.AgentConfig.ServiceCIDR = cidr
|
||||||
|
} else {
|
||||||
|
nodeConfig.AgentConfig.ServiceCIDR = &net.IPNet{}
|
||||||
}
|
}
|
||||||
if nodePortRange != "" {
|
if nodePortRange != "" {
|
||||||
portRange, err := utilnet.ParsePortRange(nodePortRange)
|
portRange, err := utilnet.ParsePortRange(nodePortRange)
|
||||||
|
|
|
@ -2,7 +2,6 @@ package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -14,6 +13,7 @@ import (
|
||||||
"github.com/containerd/cgroups"
|
"github.com/containerd/cgroups"
|
||||||
cgroupsv2 "github.com/containerd/cgroups/v2"
|
cgroupsv2 "github.com/containerd/cgroups/v2"
|
||||||
systemd "github.com/coreos/go-systemd/daemon"
|
systemd "github.com/coreos/go-systemd/daemon"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rancher/k3s/pkg/agent/config"
|
"github.com/rancher/k3s/pkg/agent/config"
|
||||||
"github.com/rancher/k3s/pkg/agent/containerd"
|
"github.com/rancher/k3s/pkg/agent/containerd"
|
||||||
"github.com/rancher/k3s/pkg/agent/flannel"
|
"github.com/rancher/k3s/pkg/agent/flannel"
|
||||||
|
@ -23,11 +23,12 @@ import (
|
||||||
"github.com/rancher/k3s/pkg/agent/tunnel"
|
"github.com/rancher/k3s/pkg/agent/tunnel"
|
||||||
"github.com/rancher/k3s/pkg/cli/cmds"
|
"github.com/rancher/k3s/pkg/cli/cmds"
|
||||||
"github.com/rancher/k3s/pkg/clientaccess"
|
"github.com/rancher/k3s/pkg/clientaccess"
|
||||||
|
cp "github.com/rancher/k3s/pkg/cloudprovider"
|
||||||
"github.com/rancher/k3s/pkg/daemons/agent"
|
"github.com/rancher/k3s/pkg/daemons/agent"
|
||||||
daemonconfig "github.com/rancher/k3s/pkg/daemons/config"
|
daemonconfig "github.com/rancher/k3s/pkg/daemons/config"
|
||||||
"github.com/rancher/k3s/pkg/nodeconfig"
|
"github.com/rancher/k3s/pkg/nodeconfig"
|
||||||
"github.com/rancher/k3s/pkg/rootless"
|
"github.com/rancher/k3s/pkg/rootless"
|
||||||
"github.com/rancher/k3s/pkg/version"
|
"github.com/rancher/k3s/pkg/util"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
"k8s.io/apimachinery/pkg/api/equality"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -36,12 +37,7 @@ import (
|
||||||
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"k8s.io/controller-manager/app"
|
"k8s.io/controller-manager/app"
|
||||||
)
|
utilsnet "k8s.io/utils/net"
|
||||||
|
|
||||||
var (
|
|
||||||
InternalIPLabel = version.Program + ".io/internal-ip"
|
|
||||||
ExternalIPLabel = version.Program + ".io/external-ip"
|
|
||||||
HostnameLabel = version.Program + ".io/hostname"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -76,6 +72,21 @@ func setupCriCtlConfig(cfg cmds.Agent, nodeConfig *daemonconfig.Node) error {
|
||||||
func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
|
func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
|
||||||
nodeConfig := config.Get(ctx, cfg, proxy)
|
nodeConfig := config.Get(ctx, cfg, proxy)
|
||||||
|
|
||||||
|
dualCluster, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ClusterCIDRs)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to validate cluster-cidr")
|
||||||
|
}
|
||||||
|
dualService, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ServiceCIDRs)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to validate service-cidr")
|
||||||
|
}
|
||||||
|
dualNode, err := utilsnet.IsDualStackIPs(nodeConfig.AgentConfig.NodeIPs)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to validate node-ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
syssetup.Configure(dualCluster || dualService || dualNode)
|
||||||
|
|
||||||
if err := setupCriCtlConfig(cfg, nodeConfig); err != nil {
|
if err := setupCriCtlConfig(cfg, nodeConfig); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -135,7 +146,6 @@ func Run(ctx context.Context, cfg cmds.Agent) error {
|
||||||
if err := validate(); err != nil {
|
if err := validate(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
syssetup.Configure()
|
|
||||||
|
|
||||||
if cfg.Rootless && !cfg.RootlessAlreadyUnshared {
|
if cfg.Rootless && !cfg.RootlessAlreadyUnshared {
|
||||||
if err := rootless.Rootless(cfg.DataDir); err != nil {
|
if err := rootless.Rootless(cfg.DataDir); err != nil {
|
||||||
|
@ -231,22 +241,30 @@ func configureNode(ctx context.Context, agentConfig *daemonconfig.Agent, nodes v
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
newLabels, updateMutables := updateMutableLabels(agentConfig, node.Labels)
|
updateNode := false
|
||||||
|
if labels, changed := updateMutableLabels(agentConfig, node.Labels); changed {
|
||||||
|
node.Labels = labels
|
||||||
|
updateNode = true
|
||||||
|
}
|
||||||
|
|
||||||
updateAddresses := !agentConfig.DisableCCM
|
if !agentConfig.DisableCCM {
|
||||||
if updateAddresses {
|
if annotations, changed := updateAddressAnnotations(agentConfig, node.Annotations); changed {
|
||||||
newLabels, updateAddresses = updateAddressLabels(agentConfig, newLabels)
|
node.Annotations = annotations
|
||||||
|
updateNode = true
|
||||||
|
}
|
||||||
|
if labels, changed := updateLegacyAddressLabels(agentConfig, node.Labels); changed {
|
||||||
|
node.Labels = labels
|
||||||
|
updateNode = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// inject node config
|
// inject node config
|
||||||
updateNode, err := nodeconfig.SetNodeConfigAnnotations(node)
|
if changed, err := nodeconfig.SetNodeConfigAnnotations(node); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
} else if changed {
|
||||||
if updateAddresses || updateMutables {
|
|
||||||
node.Labels = newLabels
|
|
||||||
updateNode = true
|
updateNode = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if updateNode {
|
if updateNode {
|
||||||
if _, err := nodes.Update(ctx, node, metav1.UpdateOptions{}); err != nil {
|
if _, err := nodes.Update(ctx, node, metav1.UpdateOptions{}); err != nil {
|
||||||
logrus.Infof("Failed to update node %s: %v", agentConfig.NodeName, err)
|
logrus.Infof("Failed to update node %s: %v", agentConfig.NodeName, err)
|
||||||
|
@ -286,19 +304,37 @@ func updateMutableLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]
|
||||||
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
|
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateAddressLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]string) (map[string]string, bool) {
|
func updateLegacyAddressLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]string) (map[string]string, bool) {
|
||||||
|
ls := labels.Set(nodeLabels)
|
||||||
|
if ls.Has(cp.InternalIPKey) || ls.Has(cp.HostnameKey) {
|
||||||
result := map[string]string{
|
result := map[string]string{
|
||||||
InternalIPLabel: agentConfig.NodeIP,
|
cp.InternalIPKey: agentConfig.NodeIP,
|
||||||
HostnameLabel: agentConfig.NodeName,
|
cp.HostnameKey: agentConfig.NodeName,
|
||||||
}
|
}
|
||||||
|
|
||||||
if agentConfig.NodeExternalIP != "" {
|
if agentConfig.NodeExternalIP != "" {
|
||||||
result[ExternalIPLabel] = agentConfig.NodeExternalIP
|
result[cp.ExternalIPKey] = agentConfig.NodeExternalIP
|
||||||
}
|
}
|
||||||
|
|
||||||
result = labels.Merge(nodeLabels, result)
|
result = labels.Merge(nodeLabels, result)
|
||||||
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
|
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
|
||||||
}
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateAddressAnnotations(agentConfig *daemonconfig.Agent, nodeAnnotations map[string]string) (map[string]string, bool) {
|
||||||
|
result := map[string]string{
|
||||||
|
cp.InternalIPKey: util.JoinIPs(agentConfig.NodeIPs),
|
||||||
|
cp.HostnameKey: agentConfig.NodeName,
|
||||||
|
}
|
||||||
|
|
||||||
|
if agentConfig.NodeExternalIP != "" {
|
||||||
|
result[cp.ExternalIPKey] = util.JoinIPs(agentConfig.NodeExternalIPs)
|
||||||
|
}
|
||||||
|
|
||||||
|
result = labels.Merge(nodeAnnotations, result)
|
||||||
|
return result, !equality.Semantic.DeepEqual(nodeAnnotations, result)
|
||||||
|
}
|
||||||
|
|
||||||
// setupTunnelAndRunAgent should start the setup tunnel before starting kubelet and kubeproxy
|
// setupTunnelAndRunAgent should start the setup tunnel before starting kubelet and kubeproxy
|
||||||
// there are special case for etcd agents, it will wait until it can find the apiaddress from
|
// there are special case for etcd agents, it will wait until it can find the apiaddress from
|
||||||
|
|
|
@ -27,19 +27,24 @@ func enableSystemControl(file string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Configure() {
|
func Configure(enableIPv6 bool) {
|
||||||
loadKernelModule("overlay")
|
loadKernelModule("overlay")
|
||||||
loadKernelModule("nf_conntrack")
|
loadKernelModule("nf_conntrack")
|
||||||
loadKernelModule("br_netfilter")
|
loadKernelModule("br_netfilter")
|
||||||
loadKernelModule("iptable_nat")
|
loadKernelModule("iptable_nat")
|
||||||
|
if enableIPv6 {
|
||||||
|
loadKernelModule("ip6table_nat")
|
||||||
|
}
|
||||||
|
|
||||||
// Kernel is inconsistent about how devconf is configured for
|
// Kernel is inconsistent about how devconf is configured for
|
||||||
// new network namespaces between ipv4 and ipv6. Make sure to
|
// new network namespaces between ipv4 and ipv6. Make sure to
|
||||||
// enable forwarding on all and default for both ipv4 and ipv8.
|
// enable forwarding on all and default for both ipv4 and ipv6.
|
||||||
enableSystemControl("/proc/sys/net/ipv4/conf/all/forwarding")
|
enableSystemControl("/proc/sys/net/ipv4/conf/all/forwarding")
|
||||||
enableSystemControl("/proc/sys/net/ipv4/conf/default/forwarding")
|
enableSystemControl("/proc/sys/net/ipv4/conf/default/forwarding")
|
||||||
|
enableSystemControl("/proc/sys/net/bridge/bridge-nf-call-iptables")
|
||||||
|
if enableIPv6 {
|
||||||
enableSystemControl("/proc/sys/net/ipv6/conf/all/forwarding")
|
enableSystemControl("/proc/sys/net/ipv6/conf/all/forwarding")
|
||||||
enableSystemControl("/proc/sys/net/ipv6/conf/default/forwarding")
|
enableSystemControl("/proc/sys/net/ipv6/conf/default/forwarding")
|
||||||
enableSystemControl("/proc/sys/net/bridge/bridge-nf-call-iptables")
|
|
||||||
enableSystemControl("/proc/sys/net/bridge/bridge-nf-call-ip6tables")
|
enableSystemControl("/proc/sys/net/bridge/bridge-nf-call-ip6tables")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -50,8 +50,8 @@ func Run(ctx *cli.Context) error {
|
||||||
return fmt.Errorf("--server is required")
|
return fmt.Errorf("--server is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmds.AgentConfig.FlannelIface != "" && cmds.AgentConfig.NodeIP == "" {
|
if cmds.AgentConfig.FlannelIface != "" && len(cmds.AgentConfig.NodeIP) == 0 {
|
||||||
cmds.AgentConfig.NodeIP = netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface)
|
cmds.AgentConfig.NodeIP.Set(netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface))
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Info("Starting " + version.Program + " agent " + ctx.App.Version)
|
logrus.Info("Starting " + version.Program + " agent " + ctx.App.Version)
|
||||||
|
|
|
@ -20,8 +20,8 @@ type Agent struct {
|
||||||
LBServerPort int
|
LBServerPort int
|
||||||
ResolvConf string
|
ResolvConf string
|
||||||
DataDir string
|
DataDir string
|
||||||
NodeIP string
|
NodeIP cli.StringSlice
|
||||||
NodeExternalIP string
|
NodeExternalIP cli.StringSlice
|
||||||
NodeName string
|
NodeName string
|
||||||
PauseImage string
|
PauseImage string
|
||||||
Snapshotter string
|
Snapshotter string
|
||||||
|
@ -52,15 +52,15 @@ type AgentShared struct {
|
||||||
var (
|
var (
|
||||||
appName = filepath.Base(os.Args[0])
|
appName = filepath.Base(os.Args[0])
|
||||||
AgentConfig Agent
|
AgentConfig Agent
|
||||||
NodeIPFlag = cli.StringFlag{
|
NodeIPFlag = cli.StringSliceFlag{
|
||||||
Name: "node-ip,i",
|
Name: "node-ip,i",
|
||||||
Usage: "(agent/networking) IP address to advertise for node",
|
Usage: "(agent/networking) IPv4/IPv6 addresses to advertise for node",
|
||||||
Destination: &AgentConfig.NodeIP,
|
Value: &AgentConfig.NodeIP,
|
||||||
}
|
}
|
||||||
NodeExternalIPFlag = cli.StringFlag{
|
NodeExternalIPFlag = cli.StringSliceFlag{
|
||||||
Name: "node-external-ip",
|
Name: "node-external-ip",
|
||||||
Usage: "(agent/networking) External IP address to advertise for node",
|
Usage: "(agent/networking) IPv4/IPv6 external IP addresses to advertise for node",
|
||||||
Destination: &AgentConfig.NodeExternalIP,
|
Value: &AgentConfig.NodeExternalIP,
|
||||||
}
|
}
|
||||||
NodeNameFlag = cli.StringFlag{
|
NodeNameFlag = cli.StringFlag{
|
||||||
Name: "node-name",
|
Name: "node-name",
|
||||||
|
|
|
@ -13,15 +13,15 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type Server struct {
|
type Server struct {
|
||||||
ClusterCIDR string
|
ClusterCIDR cli.StringSlice
|
||||||
AgentToken string
|
AgentToken string
|
||||||
AgentTokenFile string
|
AgentTokenFile string
|
||||||
Token string
|
Token string
|
||||||
TokenFile string
|
TokenFile string
|
||||||
ClusterSecret string
|
ClusterSecret string
|
||||||
ServiceCIDR string
|
ServiceCIDR cli.StringSlice
|
||||||
ServiceNodePortRange string
|
ServiceNodePortRange string
|
||||||
ClusterDNS string
|
ClusterDNS cli.StringSlice
|
||||||
ClusterDomain string
|
ClusterDomain string
|
||||||
// The port which kubectl clients can access k8s
|
// The port which kubectl clients can access k8s
|
||||||
HTTPSPort int
|
HTTPSPort int
|
||||||
|
@ -108,7 +108,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "advertise-address",
|
Name: "advertise-address",
|
||||||
Usage: "(listener) IP address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip)",
|
Usage: "(listener) IPv4 address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip)",
|
||||||
Destination: &ServerConfig.AdvertiseIP,
|
Destination: &ServerConfig.AdvertiseIP,
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
|
@ -118,7 +118,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
|
||||||
},
|
},
|
||||||
cli.StringSliceFlag{
|
cli.StringSliceFlag{
|
||||||
Name: "tls-san",
|
Name: "tls-san",
|
||||||
Usage: "(listener) Add additional hostname or IP as a Subject Alternative Name in the TLS cert",
|
Usage: "(listener) Add additional hostnames or IPv4/IPv6 addresses as Subject Alternative Names on the server TLS cert",
|
||||||
Value: &ServerConfig.TLSSan,
|
Value: &ServerConfig.TLSSan,
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
|
@ -126,17 +126,15 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
|
||||||
Usage: "(data) Folder to hold state default /var/lib/rancher/" + version.Program + " or ${HOME}/.rancher/" + version.Program + " if not root",
|
Usage: "(data) Folder to hold state default /var/lib/rancher/" + version.Program + " or ${HOME}/.rancher/" + version.Program + " if not root",
|
||||||
Destination: &ServerConfig.DataDir,
|
Destination: &ServerConfig.DataDir,
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringSliceFlag{
|
||||||
Name: "cluster-cidr",
|
Name: "cluster-cidr",
|
||||||
Usage: "(networking) Network CIDR to use for pod IPs",
|
Usage: "(networking) IPv4/IPv6 network CIDRs to use for pod IPs (default: 10.42.0.0/16)",
|
||||||
Destination: &ServerConfig.ClusterCIDR,
|
Value: &ServerConfig.ClusterCIDR,
|
||||||
Value: "10.42.0.0/16",
|
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringSliceFlag{
|
||||||
Name: "service-cidr",
|
Name: "service-cidr",
|
||||||
Usage: "(networking) Network CIDR to use for services IPs",
|
Usage: "(networking) IPv4/IPv6 network CIDRs to use for service IPs (default: 10.43.0.0/16)",
|
||||||
Destination: &ServerConfig.ServiceCIDR,
|
Value: &ServerConfig.ServiceCIDR,
|
||||||
Value: "10.43.0.0/16",
|
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "service-node-port-range",
|
Name: "service-node-port-range",
|
||||||
|
@ -144,11 +142,10 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
|
||||||
Destination: &ServerConfig.ServiceNodePortRange,
|
Destination: &ServerConfig.ServiceNodePortRange,
|
||||||
Value: "30000-32767",
|
Value: "30000-32767",
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringSliceFlag{
|
||||||
Name: "cluster-dns",
|
Name: "cluster-dns",
|
||||||
Usage: "(networking) Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10)",
|
Usage: "(networking) IPv4 Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10)",
|
||||||
Destination: &ServerConfig.ClusterDNS,
|
Value: &ServerConfig.ClusterDNS,
|
||||||
Value: "",
|
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "cluster-domain",
|
Name: "cluster-domain",
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"github.com/rancher/k3s/pkg/rootless"
|
"github.com/rancher/k3s/pkg/rootless"
|
||||||
"github.com/rancher/k3s/pkg/server"
|
"github.com/rancher/k3s/pkg/server"
|
||||||
"github.com/rancher/k3s/pkg/token"
|
"github.com/rancher/k3s/pkg/token"
|
||||||
|
"github.com/rancher/k3s/pkg/util"
|
||||||
"github.com/rancher/k3s/pkg/version"
|
"github.com/rancher/k3s/pkg/version"
|
||||||
"github.com/rancher/wrangler/pkg/signals"
|
"github.com/rancher/wrangler/pkg/signals"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
@ -27,6 +28,7 @@ import (
|
||||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||||
kubeapiserverflag "k8s.io/component-base/cli/flag"
|
kubeapiserverflag "k8s.io/component-base/cli/flag"
|
||||||
"k8s.io/kubernetes/pkg/controlplane"
|
"k8s.io/kubernetes/pkg/controlplane"
|
||||||
|
utilsnet "k8s.io/utils/net"
|
||||||
|
|
||||||
_ "github.com/go-sql-driver/mysql" // ensure we have mysql
|
_ "github.com/go-sql-driver/mysql" // ensure we have mysql
|
||||||
_ "github.com/lib/pq" // ensure we have postgres
|
_ "github.com/lib/pq" // ensure we have postgres
|
||||||
|
@ -140,7 +142,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
|
||||||
serverConfig.ControlConfig.EtcdS3Folder = cfg.EtcdS3Folder
|
serverConfig.ControlConfig.EtcdS3Folder = cfg.EtcdS3Folder
|
||||||
|
|
||||||
if cfg.ClusterResetRestorePath != "" && !cfg.ClusterReset {
|
if cfg.ClusterResetRestorePath != "" && !cfg.ClusterReset {
|
||||||
return errors.New("invalid flag use. --cluster-reset required with --cluster-reset-restore-path")
|
return errors.New("invalid flag use; --cluster-reset required with --cluster-reset-restore-path")
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure components are disabled so we only perform a restore
|
// make sure components are disabled so we only perform a restore
|
||||||
|
@ -161,7 +163,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
|
||||||
}
|
}
|
||||||
|
|
||||||
if serverConfig.ControlConfig.DisableETCD && serverConfig.ControlConfig.JoinURL == "" {
|
if serverConfig.ControlConfig.DisableETCD && serverConfig.ControlConfig.JoinURL == "" {
|
||||||
return errors.New("invalid flag use. --server required with --disable-etcd")
|
return errors.New("invalid flag use; --server is required with --disable-etcd")
|
||||||
}
|
}
|
||||||
|
|
||||||
if serverConfig.ControlConfig.DisableAPIServer {
|
if serverConfig.ControlConfig.DisableAPIServer {
|
||||||
|
@ -174,50 +176,116 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmds.AgentConfig.FlannelIface != "" && cmds.AgentConfig.NodeIP == "" {
|
if cmds.AgentConfig.FlannelIface != "" && len(cmds.AgentConfig.NodeIP) == 0 {
|
||||||
cmds.AgentConfig.NodeIP = netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface)
|
cmds.AgentConfig.NodeIP.Set(netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface))
|
||||||
}
|
}
|
||||||
if serverConfig.ControlConfig.PrivateIP == "" && cmds.AgentConfig.NodeIP != "" {
|
|
||||||
serverConfig.ControlConfig.PrivateIP = cmds.AgentConfig.NodeIP
|
if serverConfig.ControlConfig.PrivateIP == "" && len(cmds.AgentConfig.NodeIP) != 0 {
|
||||||
|
// ignoring the error here is fine since etcd will fall back to the interface's IPv4 address
|
||||||
|
serverConfig.ControlConfig.PrivateIP, _ = util.GetFirst4String(cmds.AgentConfig.NodeIP)
|
||||||
}
|
}
|
||||||
if serverConfig.ControlConfig.AdvertiseIP == "" && cmds.AgentConfig.NodeExternalIP != "" {
|
|
||||||
serverConfig.ControlConfig.AdvertiseIP = cmds.AgentConfig.NodeExternalIP
|
// if not set, try setting advertise-ip from agent node-external-ip
|
||||||
|
if serverConfig.ControlConfig.AdvertiseIP == "" && len(cmds.AgentConfig.NodeExternalIP) != 0 {
|
||||||
|
serverConfig.ControlConfig.AdvertiseIP, _ = util.GetFirst4String(cmds.AgentConfig.NodeExternalIP)
|
||||||
}
|
}
|
||||||
if serverConfig.ControlConfig.AdvertiseIP == "" && cmds.AgentConfig.NodeIP != "" {
|
|
||||||
serverConfig.ControlConfig.AdvertiseIP = cmds.AgentConfig.NodeIP
|
// if not set, try setting advertise-up from agent node-ip
|
||||||
|
if serverConfig.ControlConfig.AdvertiseIP == "" && len(cmds.AgentConfig.NodeIP) != 0 {
|
||||||
|
serverConfig.ControlConfig.AdvertiseIP, _ = util.GetFirst4String(cmds.AgentConfig.NodeIP)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if we ended up with any advertise-ips, ensure they're added to the SAN list;
|
||||||
|
// note that kube-apiserver does not support dual-stack advertise-ip as of 1.21.0:
|
||||||
|
/// https://github.com/kubernetes/kubeadm/issues/1612#issuecomment-772583989
|
||||||
if serverConfig.ControlConfig.AdvertiseIP != "" {
|
if serverConfig.ControlConfig.AdvertiseIP != "" {
|
||||||
serverConfig.ControlConfig.SANs = append(serverConfig.ControlConfig.SANs, serverConfig.ControlConfig.AdvertiseIP)
|
serverConfig.ControlConfig.SANs = append(serverConfig.ControlConfig.SANs, serverConfig.ControlConfig.AdvertiseIP)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, serverConfig.ControlConfig.ClusterIPRange, err = net.ParseCIDR(cfg.ClusterCIDR)
|
// configure ClusterIPRanges
|
||||||
if err != nil {
|
if len(cmds.ServerConfig.ClusterCIDR) == 0 {
|
||||||
return errors.Wrapf(err, "Invalid CIDR %s: %v", cfg.ClusterCIDR, err)
|
cmds.ServerConfig.ClusterCIDR.Set("10.42.0.0/16")
|
||||||
}
|
}
|
||||||
_, serverConfig.ControlConfig.ServiceIPRange, err = net.ParseCIDR(cfg.ServiceCIDR)
|
for _, cidr := range cmds.ServerConfig.ClusterCIDR {
|
||||||
|
for _, v := range strings.Split(cidr, ",") {
|
||||||
|
_, parsed, err := net.ParseCIDR(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Invalid CIDR %s: %v", cfg.ServiceCIDR, err)
|
return errors.Wrapf(err, "invalid cluster-cidr %s", v)
|
||||||
}
|
}
|
||||||
|
serverConfig.ControlConfig.ClusterIPRanges = append(serverConfig.ControlConfig.ClusterIPRanges, parsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set ClusterIPRange to the first IPv4 block, for legacy clients
|
||||||
|
clusterIPRange, err := util.GetFirst4Net(serverConfig.ControlConfig.ClusterIPRanges)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "cannot configure IPv4 cluster-cidr")
|
||||||
|
}
|
||||||
|
serverConfig.ControlConfig.ClusterIPRange = clusterIPRange
|
||||||
|
|
||||||
|
// configure ServiceIPRanges
|
||||||
|
if len(cmds.ServerConfig.ServiceCIDR) == 0 {
|
||||||
|
cmds.ServerConfig.ServiceCIDR.Set("10.43.0.0/16")
|
||||||
|
}
|
||||||
|
for _, cidr := range cmds.ServerConfig.ServiceCIDR {
|
||||||
|
for _, v := range strings.Split(cidr, ",") {
|
||||||
|
_, parsed, err := net.ParseCIDR(v)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "invalid service-cidr %s", v)
|
||||||
|
}
|
||||||
|
serverConfig.ControlConfig.ServiceIPRanges = append(serverConfig.ControlConfig.ServiceIPRanges, parsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set ServiceIPRange to the first IPv4 block, for legacy clients
|
||||||
|
serviceIPRange, err := util.GetFirst4Net(serverConfig.ControlConfig.ServiceIPRanges)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "cannot configure IPv4 service-cidr")
|
||||||
|
}
|
||||||
|
serverConfig.ControlConfig.ServiceIPRange = serviceIPRange
|
||||||
|
|
||||||
serverConfig.ControlConfig.ServiceNodePortRange, err = utilnet.ParsePortRange(cfg.ServiceNodePortRange)
|
serverConfig.ControlConfig.ServiceNodePortRange, err = utilnet.ParsePortRange(cfg.ServiceNodePortRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Invalid port range %s: %v", cfg.ServiceNodePortRange, err)
|
return errors.Wrapf(err, "invalid port range %s", cfg.ServiceNodePortRange)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// the apiserver service does not yet support dual-stack operation
|
||||||
_, apiServerServiceIP, err := controlplane.ServiceIPRange(*serverConfig.ControlConfig.ServiceIPRange)
|
_, apiServerServiceIP, err := controlplane.ServiceIPRange(*serverConfig.ControlConfig.ServiceIPRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
serverConfig.ControlConfig.SANs = append(serverConfig.ControlConfig.SANs, apiServerServiceIP.String())
|
serverConfig.ControlConfig.SANs = append(serverConfig.ControlConfig.SANs, apiServerServiceIP.String())
|
||||||
|
|
||||||
// If cluster-dns CLI arg is not set, we set ClusterDNS address to be ServiceCIDR network + 10,
|
// If cluster-dns CLI arg is not set, we set ClusterDNS address to be the first IPv4 ServiceCIDR network + 10,
|
||||||
// i.e. when you set service-cidr to 192.168.0.0/16 and don't provide cluster-dns, it will be set to 192.168.0.10
|
// i.e. when you set service-cidr to 192.168.0.0/16 and don't provide cluster-dns, it will be set to 192.168.0.10
|
||||||
if cfg.ClusterDNS == "" {
|
// If there are no IPv4 ServiceCIDRs, an error will be raised.
|
||||||
serverConfig.ControlConfig.ClusterDNS = make(net.IP, 4)
|
if len(cmds.ServerConfig.ClusterDNS) == 0 {
|
||||||
copy(serverConfig.ControlConfig.ClusterDNS, serverConfig.ControlConfig.ServiceIPRange.IP.To4())
|
clusterDNS, err := utilsnet.GetIndexedIP(serverConfig.ControlConfig.ServiceIPRange, 10)
|
||||||
serverConfig.ControlConfig.ClusterDNS[3] = 10
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "cannot configure default cluster-dns address")
|
||||||
|
}
|
||||||
|
serverConfig.ControlConfig.ClusterDNS = clusterDNS
|
||||||
|
serverConfig.ControlConfig.ClusterDNSs = []net.IP{serverConfig.ControlConfig.ClusterDNS}
|
||||||
} else {
|
} else {
|
||||||
serverConfig.ControlConfig.ClusterDNS = net.ParseIP(cfg.ClusterDNS)
|
for _, ip := range cmds.ServerConfig.ClusterDNS {
|
||||||
|
for _, v := range strings.Split(ip, ",") {
|
||||||
|
parsed := net.ParseIP(v)
|
||||||
|
if parsed == nil {
|
||||||
|
return fmt.Errorf("invalid cluster-dns address %s", v)
|
||||||
|
}
|
||||||
|
serverConfig.ControlConfig.ClusterDNSs = append(serverConfig.ControlConfig.ClusterDNSs, parsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Set ClusterDNS to the first IPv4 address, for legacy clients
|
||||||
|
clusterDNS, err := util.GetFirst4(serverConfig.ControlConfig.ClusterDNSs)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "cannot configure IPv4 cluster-dns address")
|
||||||
|
}
|
||||||
|
serverConfig.ControlConfig.ClusterDNS = clusterDNS
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := validateNetworkConfiguration(serverConfig); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.DefaultLocalStoragePath == "" {
|
if cfg.DefaultLocalStoragePath == "" {
|
||||||
|
@ -257,7 +325,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
|
||||||
tlsMinVersionArg := getArgValueFromList("tls-min-version", cfg.ExtraAPIArgs)
|
tlsMinVersionArg := getArgValueFromList("tls-min-version", cfg.ExtraAPIArgs)
|
||||||
serverConfig.ControlConfig.TLSMinVersion, err = kubeapiserverflag.TLSVersion(tlsMinVersionArg)
|
serverConfig.ControlConfig.TLSMinVersion, err = kubeapiserverflag.TLSVersion(tlsMinVersionArg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Invalid tls-min-version")
|
return errors.Wrap(err, "invalid tls-min-version")
|
||||||
}
|
}
|
||||||
|
|
||||||
serverConfig.StartupHooks = append(serverConfig.StartupHooks, cfg.StartupHooks...)
|
serverConfig.StartupHooks = append(serverConfig.StartupHooks, cfg.StartupHooks...)
|
||||||
|
@ -285,7 +353,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
|
||||||
}
|
}
|
||||||
serverConfig.ControlConfig.TLSCipherSuites, err = kubeapiserverflag.TLSCipherSuites(tlsCipherSuites)
|
serverConfig.ControlConfig.TLSCipherSuites, err = kubeapiserverflag.TLSCipherSuites(tlsCipherSuites)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Invalid tls-cipher-suites")
|
return errors.Wrap(err, "invalid tls-cipher-suites")
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Info("Starting " + version.Program + " " + app.App.Version)
|
logrus.Info("Starting " + version.Program + " " + app.App.Version)
|
||||||
|
@ -353,6 +421,34 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
|
||||||
return agent.Run(ctx, agentConfig)
|
return agent.Run(ctx, agentConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateNetworkConfig ensures that the network configuration values make sense.
|
||||||
|
func validateNetworkConfiguration(serverConfig server.Config) error {
|
||||||
|
// Dual-stack operation requires fairly extensive manual configuration at the moment - do some
|
||||||
|
// preflight checks to make sure that the user isn't trying to use flannel/npc, or trying to
|
||||||
|
// enable dual-stack DNS (which we don't currently support since it's not easy to template)
|
||||||
|
dualCluster, err := utilsnet.IsDualStackCIDRs(serverConfig.ControlConfig.ClusterIPRanges)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to validate cluster-cidr")
|
||||||
|
}
|
||||||
|
dualService, err := utilsnet.IsDualStackCIDRs(serverConfig.ControlConfig.ServiceIPRanges)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to validate service-cidr")
|
||||||
|
}
|
||||||
|
dualDNS, err := utilsnet.IsDualStackIPs(serverConfig.ControlConfig.ClusterDNSs)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to validate cluster-dns")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (serverConfig.ControlConfig.FlannelBackend != "none" || serverConfig.ControlConfig.DisableNPC == false) && (dualCluster || dualService) {
|
||||||
|
return errors.New("flannel CNI and network policy enforcement are not compatible with dual-stack operation; server must be restarted with --flannel-backend=none --disable-network-policy and an alternative CNI plugin deployed")
|
||||||
|
}
|
||||||
|
if dualDNS == true {
|
||||||
|
return errors.New("dual-stack cluster-dns is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func knownIPs(ips []string) []string {
|
func knownIPs(ips []string) []string {
|
||||||
ips = append(ips, "127.0.0.1")
|
ips = append(ips, "127.0.0.1")
|
||||||
ip, err := utilnet.ChooseHostInterface()
|
ip, err := utilnet.ChooseHostInterface()
|
||||||
|
|
|
@ -3,6 +3,7 @@ package cloudprovider
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rancher/k3s/pkg/version"
|
"github.com/rancher/k3s/pkg/version"
|
||||||
|
@ -13,9 +14,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
InternalIPLabel = version.Program + ".io/internal-ip"
|
InternalIPKey = version.Program + ".io/internal-ip"
|
||||||
ExternalIPLabel = version.Program + ".io/external-ip"
|
ExternalIPKey = version.Program + ".io/external-ip"
|
||||||
HostnameLabel = version.Program + ".io/hostname"
|
HostnameKey = version.Program + ".io/hostname"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (k *k3s) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
func (k *k3s) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||||
|
@ -69,22 +70,32 @@ func (k *k3s) NodeAddresses(ctx context.Context, name types.NodeName) ([]corev1.
|
||||||
return nil, fmt.Errorf("Failed to find node %s: %v", name, err)
|
return nil, fmt.Errorf("Failed to find node %s: %v", name, err)
|
||||||
}
|
}
|
||||||
// check internal address
|
// check internal address
|
||||||
if node.Labels[InternalIPLabel] != "" {
|
if address := node.Annotations[InternalIPKey]; address != "" {
|
||||||
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: node.Labels[InternalIPLabel]})
|
for _, v := range strings.Split(address, ",") {
|
||||||
|
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: v})
|
||||||
|
}
|
||||||
|
} else if address = node.Labels[InternalIPKey]; address != "" {
|
||||||
|
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: address})
|
||||||
} else {
|
} else {
|
||||||
logrus.Infof("Couldn't find node internal ip label on node %s", name)
|
logrus.Infof("Couldn't find node internal ip annotation or label on node %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check external address
|
// check external address
|
||||||
if node.Labels[ExternalIPLabel] != "" {
|
if address := node.Annotations[ExternalIPKey]; address != "" {
|
||||||
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: node.Labels[ExternalIPLabel]})
|
for _, v := range strings.Split(address, ",") {
|
||||||
|
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: v})
|
||||||
|
}
|
||||||
|
} else if address = node.Labels[ExternalIPKey]; address != "" {
|
||||||
|
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: address})
|
||||||
}
|
}
|
||||||
|
|
||||||
// check hostname
|
// check hostname
|
||||||
if node.Labels[HostnameLabel] != "" {
|
if address := node.Annotations[HostnameKey]; address != "" {
|
||||||
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeHostName, Address: node.Labels[HostnameLabel]})
|
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeHostName, Address: address})
|
||||||
|
} else if address = node.Labels[HostnameKey]; address != "" {
|
||||||
|
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeHostName, Address: address})
|
||||||
} else {
|
} else {
|
||||||
logrus.Infof("Couldn't find node hostname label on node %s", name)
|
logrus.Infof("Couldn't find node hostname annotation or label on node %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return addresses, nil
|
return addresses, nil
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"github.com/opencontainers/runc/libcontainer/system"
|
"github.com/opencontainers/runc/libcontainer/system"
|
||||||
"github.com/rancher/k3s/pkg/daemons/config"
|
"github.com/rancher/k3s/pkg/daemons/config"
|
||||||
"github.com/rancher/k3s/pkg/daemons/executor"
|
"github.com/rancher/k3s/pkg/daemons/executor"
|
||||||
|
"github.com/rancher/k3s/pkg/util"
|
||||||
"github.com/rancher/k3s/pkg/version"
|
"github.com/rancher/k3s/pkg/version"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
|
@ -47,7 +48,7 @@ func startKubeProxy(cfg *config.Agent) error {
|
||||||
"proxy-mode": "iptables",
|
"proxy-mode": "iptables",
|
||||||
"healthz-bind-address": "127.0.0.1",
|
"healthz-bind-address": "127.0.0.1",
|
||||||
"kubeconfig": cfg.KubeConfigKubeProxy,
|
"kubeconfig": cfg.KubeConfigKubeProxy,
|
||||||
"cluster-cidr": cfg.ClusterCIDR.String(),
|
"cluster-cidr": util.JoinIPNets(cfg.ClusterCIDRs),
|
||||||
}
|
}
|
||||||
if cfg.NodeName != "" {
|
if cfg.NodeName != "" {
|
||||||
argsMap["hostname-override"] = cfg.NodeName
|
argsMap["hostname-override"] = cfg.NodeName
|
||||||
|
@ -94,7 +95,7 @@ func startKubelet(cfg *config.Agent) error {
|
||||||
argsMap["network-plugin"] = "cni"
|
argsMap["network-plugin"] = "cni"
|
||||||
}
|
}
|
||||||
if len(cfg.ClusterDNS) > 0 {
|
if len(cfg.ClusterDNS) > 0 {
|
||||||
argsMap["cluster-dns"] = cfg.ClusterDNS.String()
|
argsMap["cluster-dns"] = util.JoinIPs(cfg.ClusterDNSs)
|
||||||
}
|
}
|
||||||
if cfg.ResolvConf != "" {
|
if cfg.ResolvConf != "" {
|
||||||
argsMap["resolv-conf"] = cfg.ResolvConf
|
argsMap["resolv-conf"] = cfg.ResolvConf
|
||||||
|
|
|
@ -58,10 +58,13 @@ type Agent struct {
|
||||||
NodeConfigPath string
|
NodeConfigPath string
|
||||||
ServingKubeletCert string
|
ServingKubeletCert string
|
||||||
ServingKubeletKey string
|
ServingKubeletKey string
|
||||||
ServiceCIDR net.IPNet
|
ServiceCIDR *net.IPNet
|
||||||
|
ServiceCIDRs []*net.IPNet
|
||||||
ServiceNodePortRange utilnet.PortRange
|
ServiceNodePortRange utilnet.PortRange
|
||||||
ClusterCIDR net.IPNet
|
ClusterCIDR *net.IPNet
|
||||||
|
ClusterCIDRs []*net.IPNet
|
||||||
ClusterDNS net.IP
|
ClusterDNS net.IP
|
||||||
|
ClusterDNSs []net.IP
|
||||||
ClusterDomain string
|
ClusterDomain string
|
||||||
ResolvConf string
|
ResolvConf string
|
||||||
RootDir string
|
RootDir string
|
||||||
|
@ -69,7 +72,9 @@ type Agent struct {
|
||||||
KubeConfigKubeProxy string
|
KubeConfigKubeProxy string
|
||||||
KubeConfigK3sController string
|
KubeConfigK3sController string
|
||||||
NodeIP string
|
NodeIP string
|
||||||
|
NodeIPs []net.IP
|
||||||
NodeExternalIP string
|
NodeExternalIP string
|
||||||
|
NodeExternalIPs []net.IP
|
||||||
RuntimeSocket string
|
RuntimeSocket string
|
||||||
ListenAddress string
|
ListenAddress string
|
||||||
ClientCA string
|
ClientCA string
|
||||||
|
@ -106,9 +111,12 @@ type Control struct {
|
||||||
AgentToken string `json:"-"`
|
AgentToken string `json:"-"`
|
||||||
Token string `json:"-"`
|
Token string `json:"-"`
|
||||||
ClusterIPRange *net.IPNet
|
ClusterIPRange *net.IPNet
|
||||||
|
ClusterIPRanges []*net.IPNet
|
||||||
ServiceIPRange *net.IPNet
|
ServiceIPRange *net.IPNet
|
||||||
|
ServiceIPRanges []*net.IPNet
|
||||||
ServiceNodePortRange *utilnet.PortRange
|
ServiceNodePortRange *utilnet.PortRange
|
||||||
ClusterDNS net.IP
|
ClusterDNS net.IP
|
||||||
|
ClusterDNSs []net.IP
|
||||||
ClusterDomain string
|
ClusterDomain string
|
||||||
NoCoreDNS bool
|
NoCoreDNS bool
|
||||||
KubeConfigOutput string
|
KubeConfigOutput string
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/rancher/k3s/pkg/daemons/config"
|
"github.com/rancher/k3s/pkg/daemons/config"
|
||||||
"github.com/rancher/k3s/pkg/daemons/control/deps"
|
"github.com/rancher/k3s/pkg/daemons/control/deps"
|
||||||
"github.com/rancher/k3s/pkg/daemons/executor"
|
"github.com/rancher/k3s/pkg/daemons/executor"
|
||||||
|
util2 "github.com/rancher/k3s/pkg/util"
|
||||||
"github.com/rancher/k3s/pkg/version"
|
"github.com/rancher/k3s/pkg/version"
|
||||||
"github.com/rancher/wrangler-api/pkg/generated/controllers/rbac"
|
"github.com/rancher/wrangler-api/pkg/generated/controllers/rbac"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
@ -100,7 +101,7 @@ func controllerManager(cfg *config.Control, runtime *config.ControlRuntime) erro
|
||||||
"kubeconfig": runtime.KubeConfigController,
|
"kubeconfig": runtime.KubeConfigController,
|
||||||
"service-account-private-key-file": runtime.ServiceKey,
|
"service-account-private-key-file": runtime.ServiceKey,
|
||||||
"allocate-node-cidrs": "true",
|
"allocate-node-cidrs": "true",
|
||||||
"cluster-cidr": cfg.ClusterIPRange.String(),
|
"cluster-cidr": util2.JoinIPNets(cfg.ClusterIPRanges),
|
||||||
"root-ca-file": runtime.ServerCA,
|
"root-ca-file": runtime.ServerCA,
|
||||||
"port": "10252",
|
"port": "10252",
|
||||||
"profiling": "false",
|
"profiling": "false",
|
||||||
|
@ -155,7 +156,7 @@ func apiServer(ctx context.Context, cfg *config.Control, runtime *config.Control
|
||||||
argsMap["allow-privileged"] = "true"
|
argsMap["allow-privileged"] = "true"
|
||||||
argsMap["authorization-mode"] = strings.Join([]string{modes.ModeNode, modes.ModeRBAC}, ",")
|
argsMap["authorization-mode"] = strings.Join([]string{modes.ModeNode, modes.ModeRBAC}, ",")
|
||||||
argsMap["service-account-signing-key-file"] = runtime.ServiceKey
|
argsMap["service-account-signing-key-file"] = runtime.ServiceKey
|
||||||
argsMap["service-cluster-ip-range"] = cfg.ServiceIPRange.String()
|
argsMap["service-cluster-ip-range"] = util2.JoinIPNets(cfg.ServiceIPRanges)
|
||||||
argsMap["service-node-port-range"] = cfg.ServiceNodePortRange.String()
|
argsMap["service-node-port-range"] = cfg.ServiceNodePortRange.String()
|
||||||
argsMap["advertise-port"] = strconv.Itoa(cfg.AdvertisePort)
|
argsMap["advertise-port"] = strconv.Itoa(cfg.AdvertisePort)
|
||||||
if cfg.AdvertiseIP != "" {
|
if cfg.AdvertiseIP != "" {
|
||||||
|
@ -360,7 +361,7 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control, runtime *c
|
||||||
|
|
||||||
ccmOptions.KubeCloudShared.AllocateNodeCIDRs = true
|
ccmOptions.KubeCloudShared.AllocateNodeCIDRs = true
|
||||||
ccmOptions.KubeCloudShared.CloudProvider.Name = version.Program
|
ccmOptions.KubeCloudShared.CloudProvider.Name = version.Program
|
||||||
ccmOptions.KubeCloudShared.ClusterCIDR = cfg.ClusterIPRange.String()
|
ccmOptions.KubeCloudShared.ClusterCIDR = util2.JoinIPNets(cfg.ClusterIPRanges)
|
||||||
ccmOptions.KubeCloudShared.ConfigureCloudRoutes = false
|
ccmOptions.KubeCloudShared.ConfigureCloudRoutes = false
|
||||||
ccmOptions.Kubeconfig = runtime.KubeConfigCloudController
|
ccmOptions.Kubeconfig = runtime.KubeConfigCloudController
|
||||||
ccmOptions.NodeStatusUpdateFrequency = metav1.Duration{Duration: 1 * time.Minute}
|
ccmOptions.NodeStatusUpdateFrequency = metav1.Duration{Duration: 1 * time.Minute}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto"
|
"crypto"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -153,8 +154,16 @@ func servingKubeletCert(server *config.Control, keyFile string, auth nodePassBoo
|
||||||
}
|
}
|
||||||
|
|
||||||
ips := []net.IP{net.ParseIP("127.0.0.1")}
|
ips := []net.IP{net.ParseIP("127.0.0.1")}
|
||||||
|
|
||||||
if nodeIP := req.Header.Get(version.Program + "-Node-IP"); nodeIP != "" {
|
if nodeIP := req.Header.Get(version.Program + "-Node-IP"); nodeIP != "" {
|
||||||
ips = append(ips, net.ParseIP(nodeIP))
|
for _, v := range strings.Split(nodeIP, ",") {
|
||||||
|
ip := net.ParseIP(v)
|
||||||
|
if ip == nil {
|
||||||
|
sendError(fmt.Errorf("invalid IP address %s", ip), resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ips = append(ips, ip)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cert, err := certutil.NewSignedCert(certutil.Config{
|
cert, err := certutil.NewSignedCert(certutil.Config{
|
||||||
|
|
|
@ -435,8 +435,8 @@ func setNoProxyEnv(config *config.Control) error {
|
||||||
envList = append(envList,
|
envList = append(envList,
|
||||||
".svc",
|
".svc",
|
||||||
"."+config.ClusterDomain,
|
"."+config.ClusterDomain,
|
||||||
config.ClusterIPRange.String(),
|
util.JoinIPNets(config.ClusterIPRanges),
|
||||||
config.ServiceIPRange.String(),
|
util.JoinIPNets(config.ServiceIPRanges),
|
||||||
)
|
)
|
||||||
os.Unsetenv("no_proxy")
|
os.Unsetenv("no_proxy")
|
||||||
return os.Setenv("NO_PROXY", strings.Join(envList, ","))
|
return os.Setenv("NO_PROXY", strings.Join(envList, ","))
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JoinIPs stringifies and joins a list of IP addresses with commas.
|
||||||
|
func JoinIPs(elems []net.IP) string {
|
||||||
|
var strs []string
|
||||||
|
for _, elem := range elems {
|
||||||
|
strs = append(strs, elem.String())
|
||||||
|
}
|
||||||
|
return strings.Join(strs, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// JoinIPNets stringifies and joins a list of IP networks with commas.
|
||||||
|
func JoinIPNets(elems []*net.IPNet) string {
|
||||||
|
var strs []string
|
||||||
|
for _, elem := range elems {
|
||||||
|
strs = append(strs, elem.String())
|
||||||
|
}
|
||||||
|
return strings.Join(strs, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFirst4Net returns the first IPv4 network from the list of IP networks.
|
||||||
|
// If no IPv4 addresses are found, an error is raised.
|
||||||
|
func GetFirst4Net(elems []*net.IPNet) (*net.IPNet, error) {
|
||||||
|
for _, elem := range elems {
|
||||||
|
if elem == nil || elem.IP.To4() == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return elem, nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("no IPv4 CIDRs found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFirst4 returns the first IPv4 address from the list of IP addresses.
|
||||||
|
// If no IPv4 addresses are found, an error is raised.
|
||||||
|
func GetFirst4(elems []net.IP) (net.IP, error) {
|
||||||
|
for _, elem := range elems {
|
||||||
|
if elem == nil || elem.To4() == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return elem, nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("no IPv4 address found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFirst4String returns the first IPv4 address from a list of IP address strings.
|
||||||
|
// If no IPv4 addresses are found, an error is raised.
|
||||||
|
func GetFirst4String(elems []string) (string, error) {
|
||||||
|
ips := []net.IP{}
|
||||||
|
for _, elem := range elems {
|
||||||
|
for _, v := range strings.Split(elem, ",") {
|
||||||
|
ips = append(ips, net.ParseIP(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ip, err := GetFirst4(ips)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return ip.String(), nil
|
||||||
|
}
|
|
@ -3053,6 +3053,7 @@ k8s.io/metrics/pkg/client/external_metrics
|
||||||
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.0-k3s1
|
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.0-k3s1
|
||||||
k8s.io/mount-utils
|
k8s.io/mount-utils
|
||||||
# k8s.io/utils v0.0.0-20201110183641-67b214c5f920
|
# k8s.io/utils v0.0.0-20201110183641-67b214c5f920
|
||||||
|
## explicit
|
||||||
k8s.io/utils/buffer
|
k8s.io/utils/buffer
|
||||||
k8s.io/utils/clock
|
k8s.io/utils/clock
|
||||||
k8s.io/utils/exec
|
k8s.io/utils/exec
|
||||||
|
|
Loading…
Reference in New Issue