Merge pull request #46479 from zjj2wry/cm

Automatic merge from submit-queue (batch tested with PRs 42256, 46479, 45436, 46440, 46417)

Fix naming and comments in Container Manage

**What this PR does / why we need it**:

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```
pull/6/head
Kubernetes Submit Queue 2017-05-26 15:59:00 -07:00 committed by GitHub
commit 55e2bfde0e
5 changed files with 16 additions and 16 deletions

View File

@ -164,7 +164,7 @@ func (l *libcontainerAdapter) adaptName(cgroupName CgroupName, outputToCgroupFs
return ConvertCgroupNameToSystemd(cgroupName, outputToCgroupFs)
}
// CgroupSubsystems holds information about the mounted cgroup subsytems
// CgroupSubsystems holds information about the mounted cgroup subsystems
type CgroupSubsystems struct {
// Cgroup subsystem mounts.
// e.g.: "/sys/fs/cgroup/cpu" -> ["cpu", "cpuacct"]
@ -181,7 +181,7 @@ type CgroupSubsystems struct {
// It uses the Libcontainer raw fs cgroup manager for cgroup management.
type cgroupManagerImpl struct {
// subsystems holds information about all the
// mounted cgroup subsytems on the node
// mounted cgroup subsystems on the node
subsystems *CgroupSubsystems
// simplifies interaction with libcontainer and its cgroup managers
adapter *libcontainerAdapter
@ -305,8 +305,8 @@ var supportedSubsystems = []subsystem{
&cgroupfs.CpuGroup{},
}
// setSupportedSubsytems sets cgroup resource limits only on the supported
// subsytems. ie. cpu and memory. We don't use libcontainer's cgroup/fs/Set()
// setSupportedSubsystems sets cgroup resource limits only on the supported
// subsystems. ie. cpu and memory. We don't use libcontainer's cgroup/fs/Set()
// method as it doesn't allow us to skip updates on the devices cgroup
// Allowing or denying all devices by writing 'a' to devices.allow or devices.deny is
// not possible once the device cgroups has children. Once the pod level cgroup are
@ -314,10 +314,10 @@ var supportedSubsystems = []subsystem{
// We would like to skip setting any values on the device cgroup in this case
// but this is not possible with libcontainers Set() method
// See https://github.com/opencontainers/runc/issues/932
func setSupportedSubsytems(cgroupConfig *libcontainerconfigs.Cgroup) error {
func setSupportedSubsystems(cgroupConfig *libcontainerconfigs.Cgroup) error {
for _, sys := range supportedSubsystems {
if _, ok := cgroupConfig.Paths[sys.Name()]; !ok {
return fmt.Errorf("Failed to find subsytem mount for subsytem: %v", sys.Name())
return fmt.Errorf("Failed to find subsystem mount for subsystem: %v", sys.Name())
}
if err := sys.Set(cgroupConfig.Paths[sys.Name()], cgroupConfig); err != nil {
return fmt.Errorf("Failed to set config for supported subsystems : %v", err)
@ -380,7 +380,7 @@ func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
Paths: cgroupPaths,
}
if err := setSupportedSubsytems(libcontainerCgroupConfig); err != nil {
if err := setSupportedSubsystems(libcontainerCgroupConfig); err != nil {
return fmt.Errorf("failed to set supported cgroup subsystems for cgroup %v: %v", cgroupConfig.Name, err)
}
return nil
@ -422,7 +422,7 @@ func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error {
// Apply(-1) is a hack to create the cgroup directories for each resource
// subsystem. The function [cgroups.Manager.apply()] applies cgroup
// configuration to the process with the specified pid.
// It creates cgroup files for each subsytems and writes the pid
// It creates cgroup files for each subsystems and writes the pid
// in the tasks file. We use the function to create all the required
// cgroup files but not attach any "real" pid to the cgroup.
if err := manager.Apply(-1); err != nil {
@ -436,7 +436,7 @@ func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error {
return nil
}
// Scans through all subsytems to find pids associated with specified cgroup.
// Scans through all subsystems to find pids associated with specified cgroup.
func (m *cgroupManagerImpl) Pids(name CgroupName) []int {
// we need the driver specific name
cgroupFsName := m.Name(name)
@ -500,11 +500,11 @@ func (m *cgroupManagerImpl) ReduceCPULimits(cgroupName CgroupName) error {
return m.Update(containerConfig)
}
func getStatsSupportedSubsytems(cgroupPaths map[string]string) (*libcontainercgroups.Stats, error) {
func getStatsSupportedSubsystems(cgroupPaths map[string]string) (*libcontainercgroups.Stats, error) {
stats := libcontainercgroups.NewStats()
for _, sys := range supportedSubsystems {
if _, ok := cgroupPaths[sys.Name()]; !ok {
return nil, fmt.Errorf("Failed to find subsytem mount for subsytem: %v", sys.Name())
return nil, fmt.Errorf("Failed to find subsystem mount for subsystem: %v", sys.Name())
}
if err := sys.GetStats(cgroupPaths[sys.Name()], stats); err != nil {
return nil, fmt.Errorf("Failed to get stats for supported subsystems : %v", err)
@ -524,7 +524,7 @@ func toResourceStats(stats *libcontainercgroups.Stats) *ResourceStats {
// Get sets the ResourceParameters of the specified cgroup as read from the cgroup fs
func (m *cgroupManagerImpl) GetResourceStats(name CgroupName) (*ResourceStats, error) {
cgroupPaths := m.buildCgroupPaths(name)
stats, err := getStatsSupportedSubsytems(cgroupPaths)
stats, err := getStatsSupportedSubsystems(cgroupPaths)
if err != nil {
return nil, fmt.Errorf("failed to get stats supported cgroup subsystems for cgroup %v: %v", name, err)
}

View File

@ -51,7 +51,7 @@ type ContainerManager interface {
// Returns a noop implementation if qos cgroup hierarchy is not enabled
NewPodContainerManager() PodContainerManager
// GetMountedSubsystems returns the mounted cgroup subsytems on the node
// GetMountedSubsystems returns the mounted cgroup subsystems on the node
GetMountedSubsystems() *CgroupSubsystems
// GetQOSContainersInfo returns the names of top level QoS containers

View File

@ -124,7 +124,7 @@ func TestCgroupMountValidationMemoryMissing(t *testing.T) {
assert.Error(t, err)
}
func TestCgroupMountValidationMultipleSubsytem(t *testing.T) {
func TestCgroupMountValidationMultipleSubsystem(t *testing.T) {
mountInt := &fakeMountInterface{
[]mount.MountPoint{
{

View File

@ -175,7 +175,7 @@ func (m *podContainerManagerImpl) ReduceCPULimits(podCgroup CgroupName) error {
return m.cgroupManager.ReduceCPULimits(podCgroup)
}
// GetAllPodsFromCgroups scans through all the subsytems of pod cgroups
// GetAllPodsFromCgroups scans through all the subsystems of pod cgroups
// Get list of pods whose cgroup still exist on the cgroup mounts
func (m *podContainerManagerImpl) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) {
// Map for storing all the found pods on the disk

View File

@ -80,7 +80,7 @@ type CgroupManager interface {
Name(name CgroupName) string
// CgroupName converts the literal cgroupfs name on the host to an internal identifier.
CgroupName(name string) CgroupName
// Pids scans through all subsytems to find pids associated with specified cgroup.
// Pids scans through all subsystems to find pids associated with specified cgroup.
Pids(name CgroupName) []int
// ReduceCPULimits reduces the CPU CFS values to the minimum amount of shares.
ReduceCPULimits(cgroupName CgroupName) error