mirror of https://github.com/k3s-io/k3s
Usernetes
parent
652e3de373
commit
6573b80097
|
@ -47,6 +47,7 @@ import (
|
|||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
|
||||
"k8s.io/utils/exec"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
@ -258,6 +259,12 @@ func newProxyServer(
|
|||
|
||||
iptInterface.AddReloadFunc(proxier.Sync)
|
||||
|
||||
var connTracker Conntracker
|
||||
if !rsystem.RunningInUserNS(){
|
||||
// if we are in userns, sysctl does not work and connTracker should be kept nil
|
||||
connTracker = &realConntracker{}
|
||||
}
|
||||
|
||||
return &ProxyServer{
|
||||
Client: client,
|
||||
EventClient: eventClient,
|
||||
|
@ -269,7 +276,7 @@ func newProxyServer(
|
|||
Broadcaster: eventBroadcaster,
|
||||
Recorder: recorder,
|
||||
ConntrackConfiguration: config.Conntrack,
|
||||
Conntracker: &realConntracker{},
|
||||
Conntracker: connTracker,
|
||||
ProxyMode: proxyMode,
|
||||
NodeRef: nodeRef,
|
||||
MetricsBindAddress: config.MetricsBindAddress,
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
@ -97,8 +98,9 @@ func (l *libcontainerAdapter) newManager(cgroups *libcontainerconfigs.Cgroup, pa
|
|||
switch l.cgroupManagerType {
|
||||
case libcontainerCgroupfs:
|
||||
return &cgroupfs.Manager{
|
||||
Cgroups: cgroups,
|
||||
Paths: paths,
|
||||
Cgroups: cgroups,
|
||||
Rootless: rsystem.RunningInUserNS(),
|
||||
Paths: paths,
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid cgroup manager configuration")
|
||||
|
@ -368,7 +370,9 @@ func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error {
|
|||
// in the tasks file. We use the function to create all the required
|
||||
// cgroup files but not attach any "real" pid to the cgroup.
|
||||
if err := manager.Apply(-1); err != nil {
|
||||
return err
|
||||
if !rsystem.RunningInUserNS() {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// it may confuse why we call set after we do apply, but the issue is that runc
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -375,7 +376,11 @@ func setupKernelTunables(option KernelTunableBehavior) error {
|
|||
klog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
err = sysctl.SetSysctl(flag, expectedValue)
|
||||
if err != nil {
|
||||
errList = append(errList, err)
|
||||
if rsystem.RunningInUserNS() {
|
||||
klog.Warningf("Updating kernel flag failed: %v: %v", flag, err)
|
||||
} else {
|
||||
errList = append(errList, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -461,13 +466,20 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error {
|
|||
},
|
||||
}
|
||||
cont.ensureStateFunc = func(_ *fs.Manager) error {
|
||||
return ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, &manager)
|
||||
err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, &manager)
|
||||
if rsystem.RunningInUserNS() {
|
||||
// if we are in userns, cgroups might not be available
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
systemContainers = append(systemContainers, cont)
|
||||
} else {
|
||||
cm.periodicTasks = append(cm.periodicTasks, func() {
|
||||
if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil {
|
||||
klog.Error(err)
|
||||
if !rsystem.RunningInUserNS() {
|
||||
klog.Error(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
cont, err := getContainer(os.Getpid())
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
|
||||
units "github.com/docker/go-units"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
|
@ -82,7 +83,9 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
|
|||
cm := m.cgroupManager
|
||||
rootContainer := m.cgroupRoot
|
||||
if !cm.Exists(rootContainer) {
|
||||
return fmt.Errorf("root container %v doesn't exist", rootContainer)
|
||||
if !rsystem.RunningInUserNS() {
|
||||
return fmt.Errorf("root container %v doesn't exist", rootContainer)
|
||||
}
|
||||
}
|
||||
|
||||
// Top level for Qos containers are created only for Burstable
|
||||
|
@ -296,15 +299,23 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error {
|
|||
}
|
||||
}
|
||||
|
||||
updateSuccess := true
|
||||
for _, config := range qosConfigs {
|
||||
err := m.cgroupManager.Update(config)
|
||||
if err != nil {
|
||||
klog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration")
|
||||
return err
|
||||
if rsystem.RunningInUserNS() {
|
||||
// if we are in userns, cgroups might not available
|
||||
updateSuccess = false
|
||||
} else {
|
||||
klog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
|
||||
if updateSuccess {
|
||||
klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog"
|
||||
|
@ -95,7 +96,10 @@ func (m *containerManager) doWork() {
|
|||
// 1. Ensure processes run in the cgroups if m.cgroupsManager is not nil.
|
||||
// 2. Ensure processes have the OOM score applied.
|
||||
if err := kubecm.EnsureDockerInContainer(version, dockerOOMScoreAdj, m.cgroupsManager); err != nil {
|
||||
klog.Errorf("Unable to ensure the docker processes run in the desired containers: %v", err)
|
||||
// if we are in userns, the operation is likely to fail, unless cgroupfs is properly chown-ed.
|
||||
if !rsystem.RunningInUserNS() {
|
||||
klog.Errorf("Unable to ensure the docker processes run in the desired containers: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@ import (
|
|||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
dockerfilters "github.com/docker/docker/api/types/filters"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/klog"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
|
@ -535,11 +537,13 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod
|
|||
checkpoint := NewPodSandboxCheckpoint("", "", &CheckpointData{})
|
||||
err := ds.checkpointManager.GetCheckpoint(id, checkpoint)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err)
|
||||
if err == errors.ErrCorruptCheckpoint {
|
||||
err = ds.checkpointManager.RemoveCheckpoint(id)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", id, err)
|
||||
if !rsystem.RunningInUserNS() {
|
||||
klog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err)
|
||||
if err == errors.ErrCorruptCheckpoint {
|
||||
err = ds.checkpointManager.RemoveCheckpoint(id)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
|
@ -1514,10 +1515,13 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
break
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(AkihiroSuda): implement rootless cgroup manager that can emulate Exists() properly
|
||||
|
||||
// Don't kill containers in pod if pod's cgroups already
|
||||
// exists or the pod is running for the first time
|
||||
podKilled := false
|
||||
if !pcm.Exists(pod) && !firstSync {
|
||||
if !pcm.Exists(pod) && !firstSync && !rsystem.RunningInUserNS() {
|
||||
if err := kl.killPod(pod, nil, podStatus, nil); err == nil {
|
||||
podKilled = true
|
||||
}
|
||||
|
@ -1536,7 +1540,9 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
}
|
||||
if err := pcm.EnsureExists(pod); err != nil {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToCreatePodContainer, "unable to ensure pod container exists: %v", err)
|
||||
return fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err)
|
||||
if !rsystem.RunningInUserNS() {
|
||||
return fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,9 @@ import (
|
|||
|
||||
"k8s.io/klog"
|
||||
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
)
|
||||
|
@ -73,7 +75,13 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error)
|
|||
nodeConfig := sp.provider.GetNodeConfig()
|
||||
rootStats, networkStats, err := sp.provider.GetCgroupStats("/", updateStats)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
|
||||
if !rsystem.RunningInUserNS() {
|
||||
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
|
||||
}
|
||||
// if we are in userns, cgroups might not be available
|
||||
klog.Errorf("failed to get root cgroup stats: %v", err)
|
||||
rootStats = &statsapi.ContainerStats{}
|
||||
networkStats = &statsapi.NetworkStats{}
|
||||
}
|
||||
rootFsStats, err := sp.provider.RootFsStats()
|
||||
if err != nil {
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
@ -179,7 +180,10 @@ func NewCustomProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptab
|
|||
|
||||
err = setRLimit(64 * 1000)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set open file handler limit: %v", err)
|
||||
if !rsystem.RunningInUserNS() {
|
||||
return nil, fmt.Errorf("failed to set open file handler limit to 64000: %v", err)
|
||||
}
|
||||
klog.Errorf("failed to set open file handler limit to 64000: %v", err)
|
||||
}
|
||||
|
||||
proxyPorts := newPortAllocator(pr)
|
||||
|
|
Loading…
Reference in New Issue