Browse Source

Fix CloudDualStackNodeIPs feature-gate inconsistency

Enable the feature-gate for both kubelet and cloud-controller-manager. Enabling it on only one side breaks RKE2, where feature-gates are not shared due to running in different processes.

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
pull/8673/head
Brad Davidson 1 year ago committed by Brad Davidson
parent
commit
b8dc95539b
  1. 17
      pkg/daemons/agent/agent_linux.go
  2. 1
      pkg/daemons/control/server.go
  3. 9
      tests/integration/kubeflags/kubeflags_test.go

17
pkg/daemons/agent/agent_linux.go

@ -123,12 +123,21 @@ func kubeletArgs(cfg *config.Agent) map[string]string {
if cfg.NodeName != "" {
argsMap["hostname-override"] = cfg.NodeName
}
if nodeIPs := util.JoinIPs(cfg.NodeIPs); nodeIPs != "" {
// If the embedded CCM is disabled, don't assume that dual-stack node IPs are safe.
// When using an external CCM, the user wants dual-stack node IPs, they will need to set the node-ip kubelet arg directly.
// This should be fine since most cloud providers have their own way of finding node IPs that doesn't depend on the kubelet
// setting them.
if cfg.DisableCCM {
dualStack, err := utilsnet.IsDualStackIPs(cfg.NodeIPs)
if err == nil && dualStack {
argsMap["feature-gates"] = util.AddFeatureGate(argsMap["feature-gates"], "CloudDualStackNodeIPs=true")
if err == nil && !dualStack {
argsMap["node-ip"] = cfg.NodeIP
}
} else {
// Cluster is using the embedded CCM, we know that the feature-gate will be enabled there as well.
argsMap["feature-gates"] = util.AddFeatureGate(argsMap["feature-gates"], "CloudDualStackNodeIPs=true")
if nodeIPs := util.JoinIPs(cfg.NodeIPs); nodeIPs != "" {
argsMap["node-ip"] = util.JoinIPs(cfg.NodeIPs)
}
argsMap["node-ip"] = nodeIPs
}
kubeletRoot, runtimeRoot, controllers := cgroups.CheckCgroups()
if !controllers["cpu"] {

1
pkg/daemons/control/server.go

@ -313,6 +313,7 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control) error {
"authentication-kubeconfig": runtime.KubeConfigCloudController,
"node-status-update-frequency": "1m0s",
"bind-address": cfg.Loopback(false),
"feature-gates": "CloudDualStackNodeIPs=true",
}
if cfg.NoLeaderElect {
argsMap["leader-elect"] = "false"

9
tests/integration/kubeflags/kubeflags_test.go

@ -148,11 +148,12 @@ var _ = Describe("create a new cluster with kube-* flags", Ordered, func() {
It("should find cloud-controller-manager starting with"+
"\"--cloud-node,--cloud-node-lifecycle,--secure-port=0\" flags ", func() {
Eventually(func() error {
match, err := testutil.SearchK3sLog(server, "Running cloud-controller-manager --allocate-node-cidrs=true"+
" --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig"+
" --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 "+
match, err := testutil.SearchK3sLog(server, "Running cloud-controller-manager --allocate-node-cidrs=true "+
"--authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig "+
"--authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 "+
"--cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 "+
"--configure-cloud-routes=false --controllers=*,-route,-cloud-node,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig "+
"--configure-cloud-routes=false --controllers=*,-route,-cloud-node,-cloud-node-lifecycle "+
"--feature-gates=CloudDualStackNodeIPs=true --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig "+
"--leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false --secure-port=0")
if err != nil {
return err

Loading…
Cancel
Save