mirror of https://github.com/k3s-io/k3s
Merge pull request #63049 from andrewsykim/kcm-nodeipam
Automatic merge from submit-queue (batch tested with PRs 63049, 59731). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.
re-enable nodeipam in kube-controller-manager
**What this PR does / why we need it**:
Re-enables nodeipam controller for external clouds. Also does a small refactor so that we don't need to pass in `allocateNodeCidr` into the controller.
In v1.10 we made a change (9187b343e1 (diff-f11913dc67d80d36b3d06a93f61c49cf)
in https://github.com/kubernetes/kubernetes/pull/57492) where nodeipam would be disabled for any cluster that sets `--cloud-provider=external`. The original intention behind this was that the nodeipam controller is cloud specific for some clouds (only GCE at the moment) so it should be moved to the CCM (cloud controller manager). After some discussions with wg-cloud-provider it makes sense to re-enable nodeipam controller in KCM and have GCE CCM enable its own cloud-specific IPAM controller as part of [Initialize()](https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/cloud.go#L33-L35). This would allow for GCE to run nodeipam in both KCM (by setting --cloud-provider=gce and --allocate-node-cidr) and in the CCM (once implemented in `Initialize()`) without disabling nodeipam in the KCM for all external clouds and avoids having to implement nodeipam in CCM.
**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #
**Special notes for your reviewer**:
**Release note**:
```release-note
Re-enable nodeipam controller for external clouds.
```
pull/8/head
commit
5a54555f59
|
@ -337,9 +337,9 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc
|
|||
controllers["ttl"] = startTTLController
|
||||
controllers["bootstrapsigner"] = startBootstrapSignerController
|
||||
controllers["tokencleaner"] = startTokenCleanerController
|
||||
controllers["nodeipam"] = startNodeIpamController
|
||||
if loopMode == IncludeCloudLoops {
|
||||
controllers["service"] = startServiceController
|
||||
controllers["nodeipam"] = startNodeIpamController
|
||||
controllers["route"] = startRouteController
|
||||
// TODO: volume controller into the IncludeCloudLoops only set.
|
||||
// TODO: Separate cluster in cloud check from node lifecycle controller.
|
||||
|
|
|
@ -79,7 +79,11 @@ func startServiceController(ctx ControllerContext) (bool, error) {
|
|||
func startNodeIpamController(ctx ControllerContext) (bool, error) {
|
||||
var clusterCIDR *net.IPNet = nil
|
||||
var serviceCIDR *net.IPNet = nil
|
||||
if ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs {
|
||||
|
||||
if !ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(strings.TrimSpace(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
|
||||
_, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
|
||||
|
@ -94,7 +98,6 @@ func startNodeIpamController(ctx ControllerContext) (bool, error) {
|
|||
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIpamController.ServiceCIDR, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeIpamController, err := nodeipamcontroller.NewNodeIpamController(
|
||||
ctx.InformerFactory.Core().V1().Nodes(),
|
||||
|
@ -103,7 +106,6 @@ func startNodeIpamController(ctx ControllerContext) (bool, error) {
|
|||
clusterCIDR,
|
||||
serviceCIDR,
|
||||
int(ctx.ComponentConfig.NodeIpamController.NodeCIDRMaskSize),
|
||||
ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs,
|
||||
ipam.CIDRAllocatorType(ctx.ComponentConfig.KubeCloudShared.CIDRAllocatorType),
|
||||
)
|
||||
if err != nil {
|
||||
|
|
|
@ -58,7 +58,6 @@ const (
|
|||
|
||||
// Controller is the controller that manages node ipam state.
|
||||
type Controller struct {
|
||||
allocateNodeCIDRs bool
|
||||
allocatorType ipam.CIDRAllocatorType
|
||||
|
||||
cloud cloudprovider.Interface
|
||||
|
@ -88,7 +87,6 @@ func NewNodeIpamController(
|
|||
clusterCIDR *net.IPNet,
|
||||
serviceCIDR *net.IPNet,
|
||||
nodeCIDRMaskSize int,
|
||||
allocateNodeCIDRs bool,
|
||||
allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
||||
|
||||
if kubeClient == nil {
|
||||
|
@ -108,14 +106,12 @@ func NewNodeIpamController(
|
|||
metrics.RegisterMetricAndTrackRateLimiterUsage("node_ipam_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||
}
|
||||
|
||||
if allocateNodeCIDRs {
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("Controller: Must specify clusterCIDR if allocateNodeCIDRs == true.")
|
||||
glog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("Controller: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
|
||||
}
|
||||
glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
|
||||
}
|
||||
|
||||
ic := &Controller{
|
||||
|
@ -124,12 +120,10 @@ func NewNodeIpamController(
|
|||
lookupIP: net.LookupIP,
|
||||
clusterCIDR: clusterCIDR,
|
||||
serviceCIDR: serviceCIDR,
|
||||
allocateNodeCIDRs: allocateNodeCIDRs,
|
||||
allocatorType: allocatorType,
|
||||
}
|
||||
|
||||
// TODO: Abstract this check into a generic controller manager should run method.
|
||||
if ic.allocateNodeCIDRs {
|
||||
if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType {
|
||||
cfg := &ipam.Config{
|
||||
Resync: ipamResyncInterval,
|
||||
|
@ -157,7 +151,6 @@ func NewNodeIpamController(
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ic.nodeLister = nodeInformer.Lister()
|
||||
ic.nodeInformerSynced = nodeInformer.Informer().HasSynced
|
||||
|
@ -176,12 +169,9 @@ func (nc *Controller) Run(stopCh <-chan struct{}) {
|
|||
return
|
||||
}
|
||||
|
||||
// TODO: Abstract this check into a generic controller manager should run method.
|
||||
if nc.allocateNodeCIDRs {
|
||||
if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType {
|
||||
go nc.cidrAllocator.Run(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ func setupAllocator(apiURL string, config *Config, clusterCIDR, serviceCIDR *net
|
|||
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
|
||||
ipamController, err := nodeipam.NewNodeIpamController(
|
||||
sharedInformer.Core().V1().Nodes(), config.Cloud, clientSet,
|
||||
clusterCIDR, serviceCIDR, subnetMaskSize, true, config.AllocatorType,
|
||||
clusterCIDR, serviceCIDR, subnetMaskSize, config.AllocatorType,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, shutdownFunc, err
|
||||
|
|
Loading…
Reference in New Issue