mirror of https://github.com/k3s-io/k3s
Merge pull request #36767 from vishh/rename-cgroups-flags
Automatic merge from submit-queue [kubelet] rename --cgroups-per-qos to --experimental-cgroups-per-qos This reflects the true nature of "cgroups per qos" feature. ```release-note * Rename `--cgroups-per-qos` to `--experimental-cgroups-per-qos` in Kubelet ```pull/6/head
commit
3245e8b355
|
@ -194,7 +194,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.MarkDeprecated("system-container", "Use --system-cgroups instead. Will be removed in a future version.")
|
||||
fs.StringVar(&s.SystemCgroups, "system-cgroups", s.SystemCgroups, "Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under `/`. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
|
||||
|
||||
fs.BoolVar(&s.CgroupsPerQOS, "cgroups-per-qos", s.CgroupsPerQOS, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
fs.BoolVar(&s.ExperimentalCgroupsPerQOS, "experimental-cgroups-per-qos", s.ExperimentalCgroupsPerQOS, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
fs.StringVar(&s.CgroupDriver, "cgroup-driver", s.CgroupDriver, "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'")
|
||||
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
|
||||
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
|
||||
|
|
|
@ -442,7 +442,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
|
|||
SystemCgroupsName: s.SystemCgroups,
|
||||
KubeletCgroupsName: s.KubeletCgroups,
|
||||
ContainerRuntime: s.ContainerRuntime,
|
||||
CgroupsPerQOS: s.CgroupsPerQOS,
|
||||
CgroupsPerQOS: s.ExperimentalCgroupsPerQOS,
|
||||
CgroupRoot: s.CgroupRoot,
|
||||
CgroupDriver: s.CgroupDriver,
|
||||
ProtectKernelDefaults: s.ProtectKernelDefaults,
|
||||
|
|
|
@ -59,7 +59,6 @@ cert-dir
|
|||
certificate-authority
|
||||
cgroup-driver
|
||||
cgroup-root
|
||||
cgroups-per-qos
|
||||
chaos-chance
|
||||
clean-start
|
||||
cleanup
|
||||
|
@ -189,6 +188,7 @@ executor-suicide-timeout
|
|||
exit-on-lock-contention
|
||||
experimental-allowed-unsafe-sysctls
|
||||
experimental-bootstrap-kubeconfig
|
||||
experimental-cgroups-per-qos
|
||||
experimental-keystone-url
|
||||
experimental-keystone-ca-file
|
||||
experimental-mounter-path
|
||||
|
|
|
@ -1401,7 +1401,7 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
yyq153[55] = x.CloudProvider != ""
|
||||
yyq153[56] = x.CloudConfigFile != ""
|
||||
yyq153[57] = x.KubeletCgroups != ""
|
||||
yyq153[58] = x.CgroupsPerQOS != false
|
||||
yyq153[58] = x.ExperimentalCgroupsPerQOS != false
|
||||
yyq153[59] = x.CgroupDriver != ""
|
||||
yyq153[60] = x.RuntimeCgroups != ""
|
||||
yyq153[61] = x.SystemCgroups != ""
|
||||
|
@ -2647,7 +2647,7 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
_ = yym345
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.CgroupsPerQOS))
|
||||
r.EncodeBool(bool(x.ExperimentalCgroupsPerQOS))
|
||||
}
|
||||
} else {
|
||||
r.EncodeBool(false)
|
||||
|
@ -2655,13 +2655,13 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
} else {
|
||||
if yyq153[58] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("cgroupsPerQOS"))
|
||||
r.EncodeString(codecSelferC_UTF81234, string("experimentalCgroupsPerQOS"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym346 := z.EncBinary()
|
||||
_ = yym346
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.CgroupsPerQOS))
|
||||
r.EncodeBool(bool(x.ExperimentalCgroupsPerQOS))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4378,11 +4378,11 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode
|
|||
} else {
|
||||
x.KubeletCgroups = string(r.DecodeString())
|
||||
}
|
||||
case "cgroupsPerQOS":
|
||||
case "experimentalCgroupsPerQOS":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.CgroupsPerQOS = false
|
||||
x.ExperimentalCgroupsPerQOS = false
|
||||
} else {
|
||||
x.CgroupsPerQOS = bool(r.DecodeBool())
|
||||
x.ExperimentalCgroupsPerQOS = bool(r.DecodeBool())
|
||||
}
|
||||
case "cgroupDriver":
|
||||
if r.TryDecodeAsNil() {
|
||||
|
@ -5807,9 +5807,9 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco
|
|||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.CgroupsPerQOS = false
|
||||
x.ExperimentalCgroupsPerQOS = false
|
||||
} else {
|
||||
x.CgroupsPerQOS = bool(r.DecodeBool())
|
||||
x.ExperimentalCgroupsPerQOS = bool(r.DecodeBool())
|
||||
}
|
||||
yyj649++
|
||||
if yyhl649 {
|
||||
|
|
|
@ -294,7 +294,7 @@ type KubeletConfiguration struct {
|
|||
// And all Burstable and BestEffort pods are brought up under their
|
||||
// specific top level QoS cgroup.
|
||||
// +optional
|
||||
CgroupsPerQOS bool `json:"cgroupsPerQOS,omitempty"`
|
||||
ExperimentalCgroupsPerQOS bool `json:"experimentalCgroupsPerQOS,omitempty"`
|
||||
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
|
||||
// +optional
|
||||
CgroupDriver string `json:"cgroupDriver,omitempty"`
|
||||
|
|
|
@ -204,8 +204,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
|||
if obj.CertDirectory == "" {
|
||||
obj.CertDirectory = "/var/run/kubernetes"
|
||||
}
|
||||
if obj.CgroupsPerQOS == nil {
|
||||
obj.CgroupsPerQOS = boolVar(false)
|
||||
if obj.ExperimentalCgroupsPerQOS == nil {
|
||||
obj.ExperimentalCgroupsPerQOS = boolVar(false)
|
||||
}
|
||||
if obj.ContainerRuntime == "" {
|
||||
obj.ContainerRuntime = "docker"
|
||||
|
@ -391,9 +391,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
|||
temp := int32(defaultIPTablesDropBit)
|
||||
obj.IPTablesDropBit = &temp
|
||||
}
|
||||
if obj.CgroupsPerQOS == nil {
|
||||
if obj.ExperimentalCgroupsPerQOS == nil {
|
||||
temp := false
|
||||
obj.CgroupsPerQOS = &temp
|
||||
obj.ExperimentalCgroupsPerQOS = &temp
|
||||
}
|
||||
if obj.CgroupDriver == "" {
|
||||
obj.CgroupDriver = "cgroupfs"
|
||||
|
@ -401,8 +401,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
|||
// NOTE: this is for backwards compatibility with earlier releases where cgroup-root was optional.
|
||||
// if cgroups per qos is not enabled, and cgroup-root is not specified, we need to default to the
|
||||
// container runtime default and not default to the root cgroup.
|
||||
if obj.CgroupsPerQOS != nil {
|
||||
if *obj.CgroupsPerQOS {
|
||||
if obj.ExperimentalCgroupsPerQOS != nil {
|
||||
if *obj.ExperimentalCgroupsPerQOS {
|
||||
if obj.CgroupRoot == "" {
|
||||
obj.CgroupRoot = "/"
|
||||
}
|
||||
|
|
|
@ -355,7 +355,7 @@ type KubeletConfiguration struct {
|
|||
// And all Burstable and BestEffort pods are brought up under their
|
||||
// specific top level QoS cgroup.
|
||||
// +optional
|
||||
CgroupsPerQOS *bool `json:"cgroupsPerQOS,omitempty"`
|
||||
ExperimentalCgroupsPerQOS *bool `json:"experimentalCgroupsPerQOS,omitempty"`
|
||||
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
|
||||
// +optional
|
||||
CgroupDriver string `json:"cgroupDriver,omitempty"`
|
||||
|
|
|
@ -330,7 +330,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
|
|||
out.RuntimeCgroups = in.RuntimeCgroups
|
||||
out.SystemCgroups = in.SystemCgroups
|
||||
out.CgroupRoot = in.CgroupRoot
|
||||
if err := api.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
|
||||
if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
|
@ -496,7 +496,7 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
|
|||
out.CloudProvider = in.CloudProvider
|
||||
out.CloudConfigFile = in.CloudConfigFile
|
||||
out.KubeletCgroups = in.KubeletCgroups
|
||||
if err := api.Convert_bool_To_Pointer_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
|
||||
if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
|
|
|
@ -302,12 +302,12 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
|
|||
out.RuntimeCgroups = in.RuntimeCgroups
|
||||
out.SystemCgroups = in.SystemCgroups
|
||||
out.CgroupRoot = in.CgroupRoot
|
||||
if in.CgroupsPerQOS != nil {
|
||||
in, out := &in.CgroupsPerQOS, &out.CgroupsPerQOS
|
||||
if in.ExperimentalCgroupsPerQOS != nil {
|
||||
in, out := &in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
} else {
|
||||
out.CgroupsPerQOS = nil
|
||||
out.ExperimentalCgroupsPerQOS = nil
|
||||
}
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
out.ContainerRuntime = in.ContainerRuntime
|
||||
|
|
|
@ -308,7 +308,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
|
|||
out.CloudProvider = in.CloudProvider
|
||||
out.CloudConfigFile = in.CloudConfigFile
|
||||
out.KubeletCgroups = in.KubeletCgroups
|
||||
out.CgroupsPerQOS = in.CgroupsPerQOS
|
||||
out.ExperimentalCgroupsPerQOS = in.ExperimentalCgroupsPerQOS
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
out.RuntimeCgroups = in.RuntimeCgroups
|
||||
out.SystemCgroups = in.SystemCgroups
|
||||
|
|
|
@ -2600,7 +2600,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
|
|||
Format: "",
|
||||
},
|
||||
},
|
||||
"cgroupsPerQOS": {
|
||||
"experimentalCgroupsPerQOS": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup.",
|
||||
Type: []string{"boolean"},
|
||||
|
@ -14428,7 +14428,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
|
|||
Format: "",
|
||||
},
|
||||
},
|
||||
"cgroupsPerQOS": {
|
||||
"experimentalCgroupsPerQOS": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup.",
|
||||
Type: []string{"boolean"},
|
||||
|
|
|
@ -435,7 +435,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
|
||||
os: kubeDeps.OSInterface,
|
||||
oomWatcher: oomWatcher,
|
||||
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
|
||||
cgroupsPerQOS: kubeCfg.ExperimentalCgroupsPerQOS,
|
||||
cgroupRoot: kubeCfg.CgroupRoot,
|
||||
mounter: kubeDeps.Mounter,
|
||||
writer: kubeDeps.Writer,
|
||||
|
|
|
@ -138,7 +138,7 @@ func GetHollowKubeletConfig(
|
|||
c.EnableCustomMetrics = false
|
||||
c.EnableDebuggingHandlers = true
|
||||
c.EnableServer = true
|
||||
c.CgroupsPerQOS = false
|
||||
c.ExperimentalCgroupsPerQOS = false
|
||||
// hairpin-veth is used to allow hairpin packets. Note that this deviates from
|
||||
// what the "real" kubelet currently does, because there's no way to
|
||||
// set promiscuous mode on docker0.
|
||||
|
|
|
@ -224,7 +224,7 @@ func RegisterNodeFlags() {
|
|||
// TODO(random-liu): Find someway to get kubelet configuration, and automatic config and filter test based on the configuration.
|
||||
flag.BoolVar(&TestContext.DisableKubenet, "disable-kubenet", false, "If true, start kubelet without kubenet. (default false)")
|
||||
flag.StringVar(&TestContext.EvictionHard, "eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", "The hard eviction thresholds. If set, pods get evicted when the specified resources drop below the thresholds.")
|
||||
flag.BoolVar(&TestContext.CgroupsPerQOS, "cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
flag.BoolVar(&TestContext.CgroupsPerQOS, "experimental-cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
flag.StringVar(&TestContext.CgroupDriver, "cgroup-driver", "", "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'")
|
||||
flag.StringVar(&TestContext.ManifestPath, "manifest-path", "", "The path to the static pod manifest file.")
|
||||
flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
|
||||
|
|
|
@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
|||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --cgroups-per-qos=true'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
|
||||
PARALLELISM=1
|
||||
|
|
|
@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
|||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS=--cgroups-per-qos=true
|
||||
TEST_ARGS=--experimental-cgroups-per-qos=true
|
||||
TIMEOUT=1h
|
||||
|
|
|
@ -5,4 +5,4 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
|||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Flaky\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS=--cgroups-per-qos=true
|
||||
TEST_ARGS=--experimental-cgroups-per-qos=true
|
||||
|
|
|
@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-pr-node-e2e
|
|||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS=--cgroups-per-qos=true
|
||||
TEST_ARGS=--experimental-cgroups-per-qos=true
|
||||
|
||||
|
|
|
@ -5,6 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
|||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --cgroups-per-qos=true'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
|
|
|
@ -18,4 +18,4 @@ CLEANUP=true
|
|||
# If true, current user will be added to the docker group on test node
|
||||
SETUP_NODE=false
|
||||
# If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run
|
||||
TEST_ARGS=--cgroups-per-qos=true
|
||||
TEST_ARGS=--experimental-cgroups-per-qos=true
|
||||
|
|
|
@ -223,7 +223,7 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
|||
}
|
||||
if framework.TestContext.CgroupsPerQOS {
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--cgroups-per-qos", "true",
|
||||
"--experimental-cgroups-per-qos", "true",
|
||||
"--cgroup-root", "/",
|
||||
)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue