mirror of https://github.com/k3s-io/k3s
Merge pull request #62206 from yujuhong/rm-rkt-refs
Automatic merge from submit-queue (batch tested with PRs 62192, 61866, 62206, 62360). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Remove rkt references in the codebase ```release-note None ```pull/8/head
commit
1dc6e87f57
|
@ -251,7 +251,7 @@ define TEST_E2E_NODE_HELP_INFO
|
|||
# GUBERNATOR: For REMOTE=true only. Produce link to Gubernator to view logs.
|
||||
# Defaults to false.
|
||||
# PARALLELISM: The number of gingko nodes to run. Defaults to 8.
|
||||
# RUNTIME: Container runtime to use (eg. docker, rkt, remote).
|
||||
# RUNTIME: Container runtime to use (eg. docker, remote).
|
||||
# Defaults to "docker".
|
||||
# CONTAINER_RUNTIME_ENDPOINT: remote container endpoint to connect to.
|
||||
# Used when RUNTIME is set to "remote".
|
||||
|
|
|
@ -83,7 +83,7 @@ type RuntimeStats struct {
|
|||
const (
|
||||
// SystemContainerKubelet is the container name for the system container tracking Kubelet usage.
|
||||
SystemContainerKubelet = "kubelet"
|
||||
// SystemContainerRuntime is the container name for the system container tracking the runtime (e.g. docker or rkt) usage.
|
||||
// SystemContainerRuntime is the container name for the system container tracking the runtime (e.g. docker) usage.
|
||||
SystemContainerRuntime = "runtime"
|
||||
// SystemContainerMisc is the container name for the system container tracking non-kubernetes processes.
|
||||
SystemContainerMisc = "misc"
|
||||
|
|
|
@ -198,7 +198,7 @@ type PodPair struct {
|
|||
|
||||
// ContainerID is a type that identifies a container.
|
||||
type ContainerID struct {
|
||||
// The type of the container runtime. e.g. 'docker', 'rkt'.
|
||||
// The type of the container runtime. e.g. 'docker'.
|
||||
Type string
|
||||
// The identification of the container, this is comsumable by
|
||||
// the underlying container runtime. (Note that the container
|
||||
|
|
|
@ -187,7 +187,7 @@ func (kl *Kubelet) updatePodCIDR(cidr string) {
|
|||
}
|
||||
|
||||
// kubelet -> generic runtime -> runtime shim -> network plugin
|
||||
// docker/rkt non-cri implementations have a passthrough UpdatePodCIDR
|
||||
// docker/non-cri implementations have a passthrough UpdatePodCIDR
|
||||
if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil {
|
||||
glog.Errorf("Failed to update pod CIDR: %v", err)
|
||||
return
|
||||
|
|
|
@ -43,8 +43,6 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
})
|
||||
|
||||
It("should invoke init containers on a RestartNever pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
|
@ -102,8 +100,6 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
})
|
||||
|
||||
It("should invoke init containers on a RestartAlways pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
|
@ -165,8 +161,6 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
})
|
||||
|
||||
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
|
@ -275,8 +269,6 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
})
|
||||
|
||||
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
|
|
|
@ -209,7 +209,7 @@ func RegisterCommonFlags() {
|
|||
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
|
||||
flag.Var(utilflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.")
|
||||
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/rkt/remote).")
|
||||
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/remote).")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
|
||||
|
@ -277,7 +277,7 @@ func RegisterNodeFlags() {
|
|||
flag.StringVar(&TestContext.NodeName, "node-name", "", "Name of the node to run tests on.")
|
||||
// TODO(random-liu): Move kubelet start logic out of the test.
|
||||
// TODO(random-liu): Move log fetch logic out of the test.
|
||||
// There are different ways to start kubelet (systemd, initd, docker, rkt, manually started etc.)
|
||||
// There are different ways to start kubelet (systemd, initd, docker, manually started etc.)
|
||||
// and manage logs (journald, upstart etc.).
|
||||
// For different situation we need to mount different things into the container, run different commands.
|
||||
// It is hard and unnecessary to deal with the complexity inside the test suite.
|
||||
|
|
|
@ -505,8 +505,6 @@ var _ = SIGDescribe("Kubectl client", func() {
|
|||
})
|
||||
|
||||
It("should support inline execution and attach", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #23335
|
||||
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
|
||||
By("executing a command with run and attach with stdin")
|
||||
|
@ -1537,9 +1535,6 @@ metadata:
|
|||
framework.ConformanceIt("should create a job from an image, then delete the job ", func() {
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
|
||||
// The rkt runtime doesn't support attach, see #23335
|
||||
framework.SkipIfContainerRuntimeIs("rkt")
|
||||
|
||||
By("executing a command with run --rm and attach with stdin")
|
||||
t := time.NewTimer(runJobTimeout)
|
||||
defer t.Stop()
|
||||
|
|
|
@ -152,9 +152,6 @@ func setOldKubeletConfig(f *framework.Framework, oldCfg *kubeletconfig.KubeletCo
|
|||
}
|
||||
|
||||
func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||
// Run only if the container runtime is not docker or remote (not rkt).
|
||||
framework.RunIfContainerRuntimeIs("docker", "remote")
|
||||
|
||||
// Enable CPU Manager in Kubelet with static policy.
|
||||
oldCfg, err := getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
|
|
|
@ -663,7 +663,6 @@ k8s.io/kubernetes/pkg/kubelet/pod,alex-mohr,1,
|
|||
k8s.io/kubernetes/pkg/kubelet/prober,alex-mohr,1,
|
||||
k8s.io/kubernetes/pkg/kubelet/prober/results,krousey,1,
|
||||
k8s.io/kubernetes/pkg/kubelet/qos,vishh,0,
|
||||
k8s.io/kubernetes/pkg/kubelet/rkt,apelisse,1,
|
||||
k8s.io/kubernetes/pkg/kubelet/secret,kevin-wangzefeng,1,
|
||||
k8s.io/kubernetes/pkg/kubelet/server,tallclair,0,
|
||||
k8s.io/kubernetes/pkg/kubelet/server/portforward,rkouj,0,
|
||||
|
|
|
Loading…
Reference in New Issue