mirror of https://github.com/k3s-io/k3s
make podcidr mask size configurable
parent
cf7a3475f3
commit
17d5a302bb
|
@ -484,7 +484,7 @@ start_kube_controller_manager() {
|
|||
params="${params} --cluster-cidr=${CLUSTER_IP_RANGE}"
|
||||
fi
|
||||
if [ -n "${SERVICE_IP_RANGE:-}" ]; then
|
||||
params="${params} --service-cidr=${SERVICE_IP_RANGE}"
|
||||
params="${params} --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
|
||||
fi
|
||||
if [ "${ALLOCATE_NODE_CIDRS:-}" = "true" ]; then
|
||||
params="${params} --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{% set cluster_name = "" -%}
|
||||
{% set cluster_cidr = "" -%}
|
||||
{% set service_cidr = "" -%}
|
||||
{% set allocate_node_cidrs = "" -%}
|
||||
{% set service_cluster_ip_range = "" %}
|
||||
{% set terminated_pod_gc = "" -%}
|
||||
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
|||
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
|
||||
{% endif -%}
|
||||
{% if pillar['service_cluster_ip_range'] is defined and pillar['service_cluster_ip_range'] != "" -%}
|
||||
{% set service_cidr = "--service_cidr=" + pillar['service_cluster_ip_range'] -%}
|
||||
{% set service_cluster_ip_range = "--service_cluster_ip_range=" + pillar['service_cluster_ip_range'] -%}
|
||||
{% endif -%}
|
||||
# When we're using flannel it is responsible for cidr allocation.
|
||||
# This is expected to be a short-term compromise.
|
||||
|
@ -63,7 +63,7 @@
|
|||
{% set log_level = pillar['controller_manager_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cidr + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
|
||||
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
|
||||
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
|
|
|
@ -206,7 +206,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
|||
go podInformer.Run(wait.NeverStop)
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(nil, clientset, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
40*time.Second, 60*time.Second, 5*time.Second, nil, nil, false)
|
||||
40*time.Second, 60*time.Second, 5*time.Second, nil, nil, 0, false)
|
||||
nodeController.Run(5 * time.Second)
|
||||
cadvisorInterface := new(cadvisortest.Fake)
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
|
||||
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
|
||||
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
|
||||
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, s.AllocateNodeCIDRs)
|
||||
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod.Duration)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
|
|
|
@ -70,6 +70,7 @@ func NewCMServer() *CMServer {
|
|||
NodeStartupGracePeriod: unversioned.Duration{Duration: 60 * time.Second},
|
||||
NodeMonitorPeriod: unversioned.Duration{Duration: 5 * time.Second},
|
||||
ClusterName: "kubernetes",
|
||||
NodeCIDRMaskSize: 24,
|
||||
TerminatedPodGCThreshold: 12500,
|
||||
VolumeConfiguration: componentconfig.VolumeConfiguration{
|
||||
EnableHostPathProvisioning: false,
|
||||
|
@ -142,6 +143,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
|
||||
fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.")
|
||||
fs.StringVar(&s.ServiceCIDR, "service-cluster-ip-range", s.ServiceCIDR, "CIDR Range for Services in cluster.")
|
||||
fs.Int32Var(&s.NodeCIDRMaskSize, "node-cidr-mask-size", s.NodeCIDRMaskSize, "Mask size for node cidr in cluster.")
|
||||
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
|
||||
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
|
||||
|
|
|
@ -157,7 +157,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
|
||||
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
|
||||
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
|
||||
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, s.AllocateNodeCIDRs)
|
||||
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod.Duration)
|
||||
|
||||
nodeStatusUpdaterController := node.NewStatusUpdater(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-status-controller")), s.NodeMonitorPeriod.Duration, time.Now)
|
||||
|
|
|
@ -34,7 +34,7 @@ cluster/photon-controller/util.sh: node_name=${1}
|
|||
cluster/rackspace/util.sh: local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
|
||||
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
|
||||
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers -%}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
|
||||
|
|
|
@ -287,6 +287,7 @@ network-plugin
|
|||
network-plugin-dir
|
||||
no-headers
|
||||
no-suggestions
|
||||
node-cidr-mask-size
|
||||
node-instance-group
|
||||
node-ip
|
||||
node-labels
|
||||
|
|
|
@ -524,6 +524,8 @@ type KubeControllerManagerConfiguration struct {
|
|||
ClusterCIDR string `json:"clusterCIDR"`
|
||||
// serviceCIDR is CIDR Range for Services in cluster.
|
||||
ServiceCIDR string `json:"serviceCIDR"`
|
||||
// NodeCIDRMaskSize is the mask size for node cidr in cluster.
|
||||
NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"`
|
||||
// allocateNodeCIDRs enables CIDRs for Pods to be allocated and set on the
|
||||
// cloud provider.
|
||||
AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"`
|
||||
|
|
|
@ -110,7 +110,11 @@ func (r *rangeAllocator) Occupy(cidr *net.IPNet) (err error) {
|
|||
cidrMask := cidr.Mask
|
||||
maskSize, _ := cidrMask.Size()
|
||||
|
||||
if r.clusterCIDR.Contains(cidr.IP.Mask(r.clusterCIDR.Mask)) && r.clusterMaskSize < maskSize {
|
||||
if !r.clusterCIDR.Contains(cidr.IP.Mask(r.clusterCIDR.Mask)) && !cidr.Contains(r.clusterCIDR.IP.Mask(cidr.Mask)) {
|
||||
return fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, r.clusterCIDR)
|
||||
}
|
||||
|
||||
if r.clusterMaskSize < maskSize {
|
||||
subNetMask := net.CIDRMask(r.subNetMaskSize, 32)
|
||||
begin, err = r.getIndexForCIDR(&net.IPNet{
|
||||
IP: cidr.IP.To4().Mask(subNetMask),
|
||||
|
@ -127,7 +131,6 @@ func (r *rangeAllocator) Occupy(cidr *net.IPNet) (err error) {
|
|||
IP: net.IP(ip).To4().Mask(subNetMask),
|
||||
Mask: subNetMask,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -336,6 +336,7 @@ func TestOccupy(t *testing.T) {
|
|||
}
|
||||
if err != nil && !tc.expectErr {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
expectedUsed := big.Int{}
|
||||
|
|
|
@ -148,6 +148,7 @@ func NewNodeController(
|
|||
nodeMonitorPeriod time.Duration,
|
||||
clusterCIDR *net.IPNet,
|
||||
serviceCIDR *net.IPNet,
|
||||
nodeCIDRMaskSize int,
|
||||
allocateNodeCIDRs bool) *NodeController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||
|
@ -168,11 +169,8 @@ func NewNodeController(
|
|||
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
// TODO(mqliang): Make pod CIDR mask size configurable.
|
||||
// For now, we assume podCIDR mask size is 24, so make sure the
|
||||
// clusterCIDR mask size is larger than 24.
|
||||
if maskSize, _ := mask.Size(); maskSize > 24 {
|
||||
glog.Fatal("NodeController: Invalid clusterCIDR, mask size must be less than 24.")
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("NodeController: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
|
||||
}
|
||||
}
|
||||
evictorLock := sync.Mutex{}
|
||||
|
@ -258,8 +256,7 @@ func NewNodeController(
|
|||
)
|
||||
|
||||
if allocateNodeCIDRs {
|
||||
// TODO(mqliang): make pod CIDR mask size configurable, for now set it to 24.
|
||||
nc.cidrAllocator = NewCIDRRangeAllocator(clusterCIDR, 24)
|
||||
nc.cidrAllocator = NewCIDRRangeAllocator(clusterCIDR, nodeCIDRMaskSize)
|
||||
}
|
||||
|
||||
return nc
|
||||
|
@ -267,8 +264,9 @@ func NewNodeController(
|
|||
|
||||
// Run starts an asynchronous loop that monitors the status of cluster nodes.
|
||||
func (nc *NodeController) Run(period time.Duration) {
|
||||
|
||||
nc.filterOutServiceRange()
|
||||
if nc.allocateNodeCIDRs {
|
||||
nc.filterOutServiceRange()
|
||||
}
|
||||
|
||||
go nc.nodeController.Run(wait.NeverStop)
|
||||
go nc.podController.Run(wait.NeverStop)
|
||||
|
@ -341,7 +339,7 @@ func (nc *NodeController) Run(period time.Duration) {
|
|||
}
|
||||
|
||||
func (nc *NodeController) filterOutServiceRange() {
|
||||
if !nc.clusterCIDR.Contains(nc.serviceCIDR.IP.Mask(nc.clusterCIDR.Mask)) {
|
||||
if !nc.clusterCIDR.Contains(nc.serviceCIDR.IP.Mask(nc.clusterCIDR.Mask)) && !nc.serviceCIDR.Contains(nc.clusterCIDR.IP.Mask(nc.serviceCIDR.Mask)) {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -660,7 +660,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler,
|
||||
evictionTimeout, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, false)
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
for _, ds := range item.daemonSets {
|
||||
nodeController.daemonSetStore.Add(&ds)
|
||||
|
@ -731,7 +731,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
|||
nodeController := NewNodeController(nil, fnh, 10*time.Minute,
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod, nil, nil, false)
|
||||
testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.cloud = &fakecloud.FakeCloud{}
|
||||
nodeController.now = func() unversioned.Time { return unversioned.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName string) (bool, error) {
|
||||
|
@ -963,7 +963,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
|
||||
for i, item := range table {
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, false)
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
|
@ -1113,7 +1113,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
|
||||
for i, item := range table {
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, false)
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
||||
|
@ -1195,7 +1195,7 @@ func TestNodeDeletion(t *testing.T) {
|
|||
}
|
||||
|
||||
nodeController := NewNodeController(nil, fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, false)
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
|
@ -1298,7 +1298,7 @@ func TestCheckPod(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, false)
|
||||
nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, 0, false)
|
||||
nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
nc.nodeStore.Store.Add(&api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -1375,7 +1375,7 @@ func TestCleanupOrphanedPods(t *testing.T) {
|
|||
newPod("b", "bar"),
|
||||
newPod("c", "gone"),
|
||||
}
|
||||
nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, false)
|
||||
nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, 0, false)
|
||||
|
||||
nc.nodeStore.Store.Add(newNode("foo"))
|
||||
nc.nodeStore.Store.Add(newNode("bar"))
|
||||
|
|
Loading…
Reference in New Issue