mirror of https://github.com/k3s-io/k3s
Merge pull request #63049 from andrewsykim/kcm-nodeipam
Automatic merge from submit-queue (batch tested with PRs 63049, 59731). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.
re-enable nodeipam in kube-controller-manager
**What this PR does / why we need it**:
Re-enables nodeipam controller for external clouds. Also does a small refactor so that we don't need to pass in `allocateNodeCidr` into the controller.
In v1.10 we made a change (9187b343e1 (diff-f11913dc67d80d36b3d06a93f61c49cf)
in https://github.com/kubernetes/kubernetes/pull/57492) where nodeipam would be disabled for any cluster that sets `--cloud-provider=external`. The original intention behind this was that the nodeipam controller is cloud specific for some clouds (only GCE at the moment) so it should be moved to the CCM (cloud controller manager). After some discussions with wg-cloud-provider it makes sense to re-enable nodeipam controller in KCM and have GCE CCM enable its own cloud-specific IPAM controller as part of [Initialize()](https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/cloud.go#L33-L35). This would allow for GCE to run nodeipam in both KCM (by setting --cloud-provider=gce and --allocate-node-cidr) and in the CCM (once implemented in `Initialize()`) without disabling nodeipam in the KCM for all external clouds and avoids having to implement nodeipam in CCM.
**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #
**Special notes for your reviewer**:
**Release note**:
```release-note
Re-enable nodeipam controller for external clouds.
```
pull/8/head
commit
5a54555f59
|
@ -337,9 +337,9 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc
|
|||
controllers["ttl"] = startTTLController
|
||||
controllers["bootstrapsigner"] = startBootstrapSignerController
|
||||
controllers["tokencleaner"] = startTokenCleanerController
|
||||
controllers["nodeipam"] = startNodeIpamController
|
||||
if loopMode == IncludeCloudLoops {
|
||||
controllers["service"] = startServiceController
|
||||
controllers["nodeipam"] = startNodeIpamController
|
||||
controllers["route"] = startRouteController
|
||||
// TODO: volume controller into the IncludeCloudLoops only set.
|
||||
// TODO: Separate cluster in cloud check from node lifecycle controller.
|
||||
|
|
|
@ -79,20 +79,23 @@ func startServiceController(ctx ControllerContext) (bool, error) {
|
|||
func startNodeIpamController(ctx ControllerContext) (bool, error) {
|
||||
var clusterCIDR *net.IPNet = nil
|
||||
var serviceCIDR *net.IPNet = nil
|
||||
if ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs {
|
||||
var err error
|
||||
if len(strings.TrimSpace(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
|
||||
_, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
|
||||
if err != nil {
|
||||
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(strings.TrimSpace(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)) != 0 {
|
||||
_, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)
|
||||
if err != nil {
|
||||
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIpamController.ServiceCIDR, err)
|
||||
}
|
||||
if !ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(strings.TrimSpace(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
|
||||
_, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
|
||||
if err != nil {
|
||||
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(strings.TrimSpace(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)) != 0 {
|
||||
_, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)
|
||||
if err != nil {
|
||||
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIpamController.ServiceCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,7 +106,6 @@ func startNodeIpamController(ctx ControllerContext) (bool, error) {
|
|||
clusterCIDR,
|
||||
serviceCIDR,
|
||||
int(ctx.ComponentConfig.NodeIpamController.NodeCIDRMaskSize),
|
||||
ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs,
|
||||
ipam.CIDRAllocatorType(ctx.ComponentConfig.KubeCloudShared.CIDRAllocatorType),
|
||||
)
|
||||
if err != nil {
|
||||
|
|
|
@ -58,8 +58,7 @@ const (
|
|||
|
||||
// Controller is the controller that manages node ipam state.
|
||||
type Controller struct {
|
||||
allocateNodeCIDRs bool
|
||||
allocatorType ipam.CIDRAllocatorType
|
||||
allocatorType ipam.CIDRAllocatorType
|
||||
|
||||
cloud cloudprovider.Interface
|
||||
clusterCIDR *net.IPNet
|
||||
|
@ -88,7 +87,6 @@ func NewNodeIpamController(
|
|||
clusterCIDR *net.IPNet,
|
||||
serviceCIDR *net.IPNet,
|
||||
nodeCIDRMaskSize int,
|
||||
allocateNodeCIDRs bool,
|
||||
allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
||||
|
||||
if kubeClient == nil {
|
||||
|
@ -108,54 +106,49 @@ func NewNodeIpamController(
|
|||
metrics.RegisterMetricAndTrackRateLimiterUsage("node_ipam_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||
}
|
||||
|
||||
if allocateNodeCIDRs {
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("Controller: Must specify clusterCIDR if allocateNodeCIDRs == true.")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("Controller: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
|
||||
}
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
|
||||
}
|
||||
|
||||
ic := &Controller{
|
||||
cloud: cloud,
|
||||
kubeClient: kubeClient,
|
||||
lookupIP: net.LookupIP,
|
||||
clusterCIDR: clusterCIDR,
|
||||
serviceCIDR: serviceCIDR,
|
||||
allocateNodeCIDRs: allocateNodeCIDRs,
|
||||
allocatorType: allocatorType,
|
||||
cloud: cloud,
|
||||
kubeClient: kubeClient,
|
||||
lookupIP: net.LookupIP,
|
||||
clusterCIDR: clusterCIDR,
|
||||
serviceCIDR: serviceCIDR,
|
||||
allocatorType: allocatorType,
|
||||
}
|
||||
|
||||
// TODO: Abstract this check into a generic controller manager should run method.
|
||||
if ic.allocateNodeCIDRs {
|
||||
if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType {
|
||||
cfg := &ipam.Config{
|
||||
Resync: ipamResyncInterval,
|
||||
MaxBackoff: ipamMaxBackoff,
|
||||
InitialRetry: ipamInitialBackoff,
|
||||
}
|
||||
switch ic.allocatorType {
|
||||
case ipam.IPAMFromClusterAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCluster
|
||||
case ipam.IPAMFromCloudAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCloud
|
||||
}
|
||||
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating ipam controller: %v", err)
|
||||
}
|
||||
if err := ipamc.Start(nodeInformer); err != nil {
|
||||
glog.Fatalf("Error trying to Init(): %v", err)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
ic.cidrAllocator, err = ipam.New(
|
||||
kubeClient, cloud, nodeInformer, ic.allocatorType, ic.clusterCIDR, ic.serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType {
|
||||
cfg := &ipam.Config{
|
||||
Resync: ipamResyncInterval,
|
||||
MaxBackoff: ipamMaxBackoff,
|
||||
InitialRetry: ipamInitialBackoff,
|
||||
}
|
||||
switch ic.allocatorType {
|
||||
case ipam.IPAMFromClusterAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCluster
|
||||
case ipam.IPAMFromCloudAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCloud
|
||||
}
|
||||
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating ipam controller: %v", err)
|
||||
}
|
||||
if err := ipamc.Start(nodeInformer); err != nil {
|
||||
glog.Fatalf("Error trying to Init(): %v", err)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
ic.cidrAllocator, err = ipam.New(
|
||||
kubeClient, cloud, nodeInformer, ic.allocatorType, ic.clusterCIDR, ic.serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,11 +169,8 @@ func (nc *Controller) Run(stopCh <-chan struct{}) {
|
|||
return
|
||||
}
|
||||
|
||||
// TODO: Abstract this check into a generic controller manager should run method.
|
||||
if nc.allocateNodeCIDRs {
|
||||
if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType {
|
||||
go nc.cidrAllocator.Run(stopCh)
|
||||
}
|
||||
if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType {
|
||||
go nc.cidrAllocator.Run(stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
|
|
|
@ -52,7 +52,7 @@ func setupAllocator(apiURL string, config *Config, clusterCIDR, serviceCIDR *net
|
|||
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
|
||||
ipamController, err := nodeipam.NewNodeIpamController(
|
||||
sharedInformer.Core().V1().Nodes(), config.Cloud, clientSet,
|
||||
clusterCIDR, serviceCIDR, subnetMaskSize, true, config.AllocatorType,
|
||||
clusterCIDR, serviceCIDR, subnetMaskSize, config.AllocatorType,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, shutdownFunc, err
|
||||
|
|
Loading…
Reference in New Issue