mirror of https://github.com/k3s-io/k3s
parent
ce95c4920d
commit
44b1e1ca0c
|
@ -227,7 +227,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.StringVar(&s.ContainerRuntime, "container_runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
|
||||
fs.StringVar(&s.DockerDaemonContainer, "docker-daemon-container", s.DockerDaemonContainer, "Optional resource-only container in which to place the Docker Daemon. Empty for no container (Default: /docker-daemon).")
|
||||
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
|
||||
fs.IntVar(&s.MaxPods, "max_pods", 100, "Number of Pods that can run on this Kubelet.")
|
||||
fs.IntVar(&s.MaxPods, "max-pods", 100, "Number of Pods that can run on this Kubelet.")
|
||||
|
||||
// Flags intended for testing, not recommended used in production environments.
|
||||
fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.")
|
||||
|
|
|
@ -41,8 +41,8 @@ func (self *ResourceList) Memory() *resource.Quantity {
|
|||
return &resource.Quantity{}
|
||||
}
|
||||
|
||||
func (self *ResourceList) MaxPods() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceMaxPods]; ok {
|
||||
func (self *ResourceList) Pods() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourcePods]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
|
|
|
@ -1245,8 +1245,7 @@ const (
|
|||
ResourceMemory ResourceName = "memory"
|
||||
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
|
||||
ResourceStorage ResourceName = "storage"
|
||||
// Number of Pods that may be running on this Node.
|
||||
ResourceMaxPods ResourceName = "maxpods"
|
||||
// Number of Pods that may be running on this Node: see ResourcePods
|
||||
)
|
||||
|
||||
// ResourceList is a set of (resource name, quantity) pairs.
|
||||
|
|
|
@ -672,7 +672,7 @@ func (nc *NodeController) getCloudNodesWithSpec() (*api.NodeList, error) {
|
|||
if resources != nil {
|
||||
node.Status.Capacity = resources.Capacity
|
||||
if node.Status.Capacity != nil {
|
||||
node.Status.Capacity[api.ResourceMaxPods] = *resource.NewQuantity(0, resource.DecimalSI)
|
||||
node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
instanceID, err := instances.ExternalID(node.Name)
|
||||
|
|
|
@ -142,7 +142,7 @@ func NewMainKubelet(
|
|||
mounter mount.Interface,
|
||||
dockerDaemonContainer string,
|
||||
configureCBR0 bool,
|
||||
maxPods int) (*Kubelet, error) {
|
||||
pods int) (*Kubelet, error) {
|
||||
if rootDirectory == "" {
|
||||
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
|
||||
}
|
||||
|
@ -249,7 +249,7 @@ func NewMainKubelet(
|
|||
cgroupRoot: cgroupRoot,
|
||||
mounter: mounter,
|
||||
configureCBR0: configureCBR0,
|
||||
maxPods: maxPods,
|
||||
pods: pods,
|
||||
}
|
||||
|
||||
if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}); err != nil {
|
||||
|
@ -468,7 +468,7 @@ type Kubelet struct {
|
|||
configureCBR0 bool
|
||||
|
||||
// Number of Pods which can be run by this Kubelet
|
||||
maxPods int
|
||||
pods int
|
||||
}
|
||||
|
||||
// getRootDir returns the full path to the directory under which kubelet can
|
||||
|
@ -1747,8 +1747,8 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||
node.Status.NodeInfo.MachineID = info.MachineID
|
||||
node.Status.NodeInfo.SystemUUID = info.SystemUUID
|
||||
node.Status.Capacity = CapacityFromMachineInfo(info)
|
||||
node.Status.Capacity[api.ResourceMaxPods] = *resource.NewQuantity(
|
||||
int64(kl.maxPods), resource.DecimalSI)
|
||||
node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
|
||||
int64(kl.pods), resource.DecimalSI)
|
||||
if node.Status.NodeInfo.BootID != "" &&
|
||||
node.Status.NodeInfo.BootID != info.BootID {
|
||||
// TODO: This requires a transaction, either both node status is updated
|
||||
|
|
|
@ -3312,7 +3312,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||
Capacity: api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
|
||||
api.ResourceMaxPods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -3362,7 +3362,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||
Capacity: api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(2048, resource.BinarySI),
|
||||
api.ResourceMaxPods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -3408,7 +3408,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||
Capacity: api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
|
||||
api.ResourceMaxPods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -3493,7 +3493,7 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
|||
Capacity: api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
|
||||
api.ResourceMaxPods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -142,13 +142,13 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
|
|||
return false, err
|
||||
}
|
||||
if podRequest.milliCPU == 0 && podRequest.memory == 0 {
|
||||
return int64(len(existingPods)) < info.Status.Capacity.MaxPods().Value(), nil
|
||||
return int64(len(existingPods)) < info.Status.Capacity.Pods().Value(), nil
|
||||
}
|
||||
pods := []*api.Pod{}
|
||||
copy(pods, existingPods)
|
||||
pods = append(existingPods, pod)
|
||||
_, exceeding := CheckPodsExceedingCapacity(pods, info.Status.Capacity)
|
||||
if len(exceeding) > 0 || int64(len(pods)) > info.Status.Capacity.MaxPods().Value() {
|
||||
if len(exceeding) > 0 || int64(len(pods)) > info.Status.Capacity.Pods().Value() {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
|
|
@ -44,12 +44,12 @@ func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
|
|||
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
|
||||
}
|
||||
|
||||
func makeResources(milliCPU int64, memory int64, maxPods int64) api.NodeResources {
|
||||
func makeResources(milliCPU int64, memory int64, pods int64) api.NodeResources {
|
||||
return api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||
"maxpods": *resource.NewQuantity(maxPods, resource.DecimalSI),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
api.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -60,8 +60,8 @@ func newResourcePod(usage ...resourceRequest) *api.Pod {
|
|||
containers = append(containers, api.Container{
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
|
||||
"memory": *resource.NewQuantity(req.memory, resource.BinarySI),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(req.memory, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
|
|
@ -146,7 +146,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
|
|||
Spec: api.NodeSpec{Unschedulable: false},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
"maxpods": *resource.NewQuantity(32, resource.DecimalSI),
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []api.NodeCondition{goodCondition},
|
||||
},
|
||||
|
@ -199,7 +199,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
|
|||
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
|
||||
n.Status = api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
"maxpods": *resource.NewQuantity(32, resource.DecimalSI),
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []api.NodeCondition{badCondition},
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
|
|||
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
|
||||
n.Status = api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
"maxpods": *resource.NewQuantity(32, resource.DecimalSI),
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []api.NodeCondition{goodCondition},
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue