Merge pull request #36919 from Random-Liu/remove-dependency-on-flags

Automatic merge from submit-queue

Node E2E: Remove kubelet related flags

This PR:
1. **Add `KubeletConfig` in test context.** The configuration is dynamically retrieved from kubelet `configz/` endpoint after test node registers itself.
2. **Change tests to look at `KubeletConfig` instead of other kubelet related fields in test context.**
3. **Remove kubelet related flags from node e2e.** Add `kubelet-flags` to pass kubelet flags all together.

Why do we need this change?
1. This is required by **node soaking test and node conformance test**.
  * **For node soaking test.** We'll have one job start the test environment, and another job running the actual test again and again. It's hard to tell the test by flag how kubelet is configured.
  * **For node conformance test.** Kubelet is usually started by user. It's hard and troublesome to tell the test by flag how kubelet is configured.
2. **No need to add another flag in node e2e for each kubelet flag we want to set in the test.** Just directly add the kubelet flag into `kubelet-flags`.

In the future,
1. Kubelet start logic should be moved outside of the test. We should have standard kubelet launcher or standard cloud-init node setup script etc.
2. The test should validate kubelet configuration:
  * Whether the configuration is production ready?
  * Whether the configuration has conflicts?
  * Whether some configuration conflict with node e2e/conformance test is turned on (cloud provider etc.)? (Some configurations need to be turned off when running node e2e/conformance test)
  * ...

/cc @kubernetes/sig-node
pull/6/head
Kubernetes Submit Queue 2016-11-17 11:32:57 -08:00 committed by GitHub
commit 70296869e9
23 changed files with 258 additions and 191 deletions

View File

@ -140,10 +140,9 @@ else
sudo -v || exit 1
fi
# If the flag --disable-kubenet is not set, set true by default.
if ! [[ $test_args =~ "--disable-kubenet" ]]; then
test_args="$test_args --disable-kubenet=true"
fi
# Do not use any network plugin by default. User could override the flags with
# test_args.
test_args='--kubelet-flags="--network-plugin= --network-plugin-dir=" '$test_args
# Test using the host the script was run on
# Provided for backwards compatibility

View File

@ -318,6 +318,7 @@ kubelet-client-certificate
kubelet-client-key
kubelet-docker-endpoint
kubelet-enable-debugging-handlers
kubelet-flags
kubelet-host-network-sources
kubelet-https
kubelet-kubeconfig

View File

@ -39,6 +39,7 @@ go_library(
"//pkg/api/validation:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/apis/componentconfig:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library",

View File

@ -23,6 +23,7 @@ import (
"github.com/onsi/ginkgo/config"
"github.com/spf13/viper"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/cloudprovider"
)
@ -106,25 +107,10 @@ type NodeTestContextType struct {
NodeName string
// NodeConformance indicates whether the test is running in node conformance mode.
NodeConformance bool
// DisableKubenet disables kubenet when starting kubelet.
DisableKubenet bool
// Whether to enable the QoS Cgroup Hierarchy or not
CgroupsPerQOS bool
// How the kubelet should interface with the cgroup hierarchy (cgroupfs or systemd)
CgroupDriver string
// The hard eviction thresholds
EvictionHard string
// ManifestPath is the static pod manifest path.
ManifestPath string
// PrepullImages indicates whether node e2e framework should prepull images.
PrepullImages bool
// Enable CRI integration.
EnableCRI bool
// ContainerRuntimeEndpoint is the endpoint of remote container runtime grpc server. This is mainly used for Remote CRI
// validation test.
ContainerRuntimeEndpoint string
// MounterPath is the path to the program to run to perform a mount
MounterPath string
// KubeletConfig is the kubelet configuration the test is running against.
KubeletConfig componentconfig.KubeletConfiguration
}
type CloudConfig struct {
@ -220,17 +206,7 @@ func RegisterNodeFlags() {
// For different situation we need to mount different things into the container, run different commands.
// It is hard and unnecessary to deal with the complexity inside the test suite.
flag.BoolVar(&TestContext.NodeConformance, "conformance", false, "If true, the test suite will not start kubelet, and fetch system log (kernel, docker, kubelet log etc.) to the report directory.")
// TODO(random-liu): Remove kubelet related flags when we move the kubelet start logic out of the test.
// TODO(random-liu): Find someway to get kubelet configuration, and automatic config and filter test based on the configuration.
flag.BoolVar(&TestContext.DisableKubenet, "disable-kubenet", false, "If true, start kubelet without kubenet. (default false)")
flag.StringVar(&TestContext.EvictionHard, "eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", "The hard eviction thresholds. If set, pods get evicted when the specified resources drop below the thresholds.")
flag.BoolVar(&TestContext.CgroupsPerQOS, "experimental-cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
flag.StringVar(&TestContext.CgroupDriver, "cgroup-driver", "", "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'")
flag.StringVar(&TestContext.ManifestPath, "manifest-path", "", "The path to the static pod manifest file.")
flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
flag.BoolVar(&TestContext.EnableCRI, "enable-cri", false, "Enable Container Runtime Interface (CRI) integration.")
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "", "The endpoint of remote container runtime grpc server, mainly used for Remote CRI validation.")
flag.StringVar(&TestContext.MounterPath, "experimental-mounter-path", "", "Path of mounter binary. Leave empty to use the default mount.")
}
// overwriteFlagsWithViperConfig finds and writes values to flags using viper as input.

View File

@ -54,7 +54,7 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
// convert the names to their literal cgroupfs forms...
cgroupFsNames := []string{}
for _, cgroupName := range cgroupNames {
if framework.TestContext.CgroupDriver == "systemd" {
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
cgroupFsNames = append(cgroupFsNames, cm.ConvertCgroupNameToSystemd(cgroupName, true))
} else {
cgroupFsNames = append(cgroupFsNames, string(cgroupName))
@ -103,7 +103,7 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
// makePodToVerifyCgroupRemoved verfies the specified cgroup does not exist.
func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *api.Pod {
cgroupFsName := string(cgroupName)
if framework.TestContext.CgroupDriver == "systemd" {
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
cgroupFsName = cm.ConvertCgroupNameToSystemd(cm.CgroupName(cgroupName), true)
}
pod := &api.Pod{
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
Describe("QOS containers", func() {
Context("On enabling QOS cgroup hierarchy", func() {
It("Top level QoS containers should have been created", func() {
if !framework.TestContext.CgroupsPerQOS {
if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS {
return
}
cgroupsToVerify := []cm.CgroupName{cm.CgroupName(qos.Burstable), cm.CgroupName(qos.BestEffort)}
@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
Describe("Pod containers", func() {
Context("On scheduling a Guaranteed Pod", func() {
It("Pod containers should have been created under the cgroup-root", func() {
if !framework.TestContext.CgroupsPerQOS {
if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS {
return
}
var (
@ -202,7 +202,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
Context("On scheduling a BestEffort Pod", func() {
It("Pod containers should have been created under the BestEffort cgroup", func() {
if !framework.TestContext.CgroupsPerQOS {
if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS {
return
}
var (
@ -246,7 +246,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
Context("On scheduling a Burstable Pod", func() {
It("Pod containers should have been created under the Burstable cgroup", func() {
if !framework.TestContext.CgroupsPerQOS {
if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS {
return
}
var (

View File

@ -223,7 +223,7 @@ func nodeHasDiskPressure(cs clientset.Interface) bool {
}
func evictionOptionIsSet() bool {
return len(framework.TestContext.EvictionHard) > 0
return len(framework.TestContext.KubeletConfig.EvictionHard) > 0
}
// TODO(random-liu): Use OSImage in node status to do the check.

View File

@ -20,7 +20,6 @@ package e2e_node
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
@ -151,15 +150,8 @@ var _ = SynchronizedBeforeSuite(func() []byte {
// Reference common test to make the import valid.
commontest.CurrentSuite = commontest.NodeE2E
data, err := json.Marshal(&framework.TestContext.NodeTestContextType)
Expect(err).NotTo(HaveOccurred(), "should be able to serialize node test context.")
return data
}, func(data []byte) {
// The node test context is updated in the first function, update it on every test node.
err := json.Unmarshal(data, &framework.TestContext.NodeTestContextType)
Expect(err).NotTo(HaveOccurred(), "should be able to deserialize node test context.")
return nil
}, func([]byte) {
// update test context with node configuration.
Expect(updateTestContext()).To(Succeed(), "update test context with node config.")
})
@ -235,12 +227,20 @@ func updateTestContext() error {
if err != nil {
return fmt.Errorf("failed to get apiserver client: %v", err)
}
// Update test context with current node object.
node, err := getNode(client)
if err != nil {
return fmt.Errorf("failed to get node: %v", err)
}
// Initialize the node name
framework.TestContext.NodeName = node.Name
framework.TestContext.NodeName = node.Name // Set node name.
// Update test context with current kubelet configuration.
// This assumes all tests which dynamically change kubelet configuration
// must: 1) run in serial; 2) restore kubelet configuration after test.
kubeletCfg, err := getCurrentKubeletConfig()
if err != nil {
return fmt.Errorf("failed to get kubelet configuration: %v", err)
}
framework.TestContext.KubeletConfig = *kubeletCfg // Set kubelet config.
return nil
}

View File

@ -5,5 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]"'
SETUP_NODE=false
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
PARALLELISM=1

View File

@ -5,5 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]"'
SETUP_NODE=false
TEST_ARGS='--enable-cri=true --feature-gates=DynamicKubeletConfig=true,StreamingProxyRedirects=true'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,StreamingProxyRedirects=true'
KUBELET_ARGS='--experimental-cri=true'
PARALLELISM=1

View File

@ -5,4 +5,5 @@ GCE_PROJECT=k8s-jkns-pr-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
SETUP_NODE=false
TEST_ARGS='--enable-cri=true --feature-gates="StreamingProxyRedirects=true"'
TEST_ARGS='--feature-gates=StreamingProxyRedirects=true'
KUBELET_ARGS='--experimental-cri=true'

View File

@ -5,6 +5,7 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
SETUP_NODE=false
TEST_ARGS='--enable-cri=true --feature-gates=DynamicKubeletConfig=true,StreamingProxyRedirects=true'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,StreamingProxyRedirects=true'
KUBELET_ARGS='--experimental-cri=true'
PARALLELISM=1
TIMEOUT=3h

View File

@ -5,5 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
SETUP_NODE=false
TEST_ARGS='--enable-cri=true --feature-gates="StreamingProxyRedirects=true"'
TEST_ARGS='--feature-gates=StreamingProxyRedirects=true'
KUBELET_ARGS='--experimental-cri=true'
TIMEOUT=1h

View File

@ -47,5 +47,5 @@ go run test/e2e_node/runner/remote/run_remote.go --logtostderr --vmodule=*=4 --
--images="$GCE_IMAGES" --image-project="$GCE_IMAGE_PROJECT" \
--image-config-file="$GCE_IMAGE_CONFIG_PATH" --cleanup="$CLEANUP" \
--results-dir="$ARTIFACTS" --ginkgo-flags="--nodes=$PARALLELISM $GINKGO_FLAGS" \
--test-timeout="$TIMEOUT" --setup-node="$SETUP_NODE" --test_args="$TEST_ARGS" \
--test-timeout="$TIMEOUT" --setup-node="$SETUP_NODE" --test_args="$TEST_ARGS --kubelet-flags=\"$KUBELET_ARGS\"" \
--instance-metadata="$GCE_INSTANCE_METADATA"

View File

@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
SETUP_NODE=false
TEST_ARGS=--experimental-cgroups-per-qos=true
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
TIMEOUT=1h

View File

@ -5,4 +5,4 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--focus="\[Flaky\]"'
SETUP_NODE=false
TEST_ARGS=--experimental-cgroups-per-qos=true
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'

View File

@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-pr-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
SETUP_NODE=false
TEST_ARGS=--experimental-cgroups-per-qos=true
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'

View File

@ -5,6 +5,7 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
SETUP_NODE=false
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
PARALLELISM=1
TIMEOUT=3h

View File

@ -17,5 +17,9 @@ GCE_IMAGE_PROJECT=
CLEANUP=true
# If true, current user will be added to the docker group on test node
SETUP_NODE=false
# KUBELET_ARGS are the arguments passed to kubelet. The args will override corresponding default kubelet
# setting in the test framework and --kubelet-flags in TEST_ARGS.
# If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run
TEST_ARGS=--experimental-cgroups-per-qos=true
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
# TEST_ARGS are args passed to node e2e test.
TEST_ARGS=''

View File

@ -37,14 +37,16 @@ import (
var _ = framework.KubeDescribe("MirrorPod", func() {
f := framework.NewDefaultFramework("mirror-pod")
Context("when create a mirror pod ", func() {
var ns, staticPodName, mirrorPodName string
var ns, manifestPath, staticPodName, mirrorPodName string
BeforeEach(func() {
ns = f.Namespace.Name
staticPodName = "static-pod-" + string(uuid.NewUUID())
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
manifestPath = framework.TestContext.KubeletConfig.PodManifestPath
By("create the static pod")
err := createStaticPod(framework.TestContext.ManifestPath, staticPodName, ns,
err := createStaticPod(manifestPath, staticPodName, ns,
"gcr.io/google_containers/nginx-slim:0.7", api.RestartPolicyAlways)
Expect(err).ShouldNot(HaveOccurred())
@ -61,7 +63,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
By("update the static pod container image")
image := framework.GetPauseImageNameForHostArch()
err = createStaticPod(framework.TestContext.ManifestPath, staticPodName, ns, image, api.RestartPolicyAlways)
err = createStaticPod(manifestPath, staticPodName, ns, image, api.RestartPolicyAlways)
Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to be updated")
@ -107,7 +109,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
})
AfterEach(func() {
By("delete the static pod")
err := deleteStaticPod(framework.TestContext.ManifestPath, staticPodName, ns)
err := deleteStaticPod(manifestPath, staticPodName, ns)
Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to disappear")

View File

@ -275,7 +275,7 @@ func RunRemote(archive string, host string, cleanup bool, junitFilePrefix string
return "", false, err
}
// Insert args at beginning of testArgs, so any values from command line take precedence
testArgs = fmt.Sprintf("--experimental-mounter-path=%s ", mounterPath) + testArgs
testArgs = fmt.Sprintf("--kubelet-flags=--experimental-mounter-path=%s ", mounterPath) + testArgs
}
// Run the tests

View File

@ -16,6 +16,7 @@ go_library(
"apiserver.go",
"etcd.go",
"internal_services.go",
"kubelet.go",
"namespace_controller.go",
"server.go",
"services.go",

View File

@ -0,0 +1,193 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package services
import (
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e_node/builder"
)
// TODO(random-liu): Replace this with standard kubelet launcher.
// args is the type used to accumulate args from the flags with the same name.
type args []string
// String function of flag.Value
func (a *args) String() string {
return fmt.Sprint(*a)
}
// Set function of flag.Value
func (a *args) Set(value string) error {
// Someone else is calling flag.Parse after the flags are parsed in the
// test framework. Use this to avoid the flag being parsed twice.
// TODO(random-liu): Figure out who is parsing the flags.
if flag.Parsed() {
return nil
}
// Note that we assume all white space in flag string is separating fields
na := strings.Fields(value)
*a = append(*a, na...)
return nil
}
// kubeletArgs is the override kubelet args specified by the test runner.
var kubeletArgs args
func init() {
flag.Var(&kubeletArgs, "kubelet-flags", "Kubelet flags passed to kubelet, this will override default kubelet flags in the test. Flags specified in multiple kubelet-flags will be concatenate.")
}
const (
// Ports of different e2e services.
kubeletPort = "10250"
kubeletReadOnlyPort = "10255"
// Health check url of kubelet
kubeletHealthCheckURL = "http://127.0.0.1:" + kubeletReadOnlyPort + "/healthz"
)
// startKubelet starts the Kubelet in a separate process or returns an error
// if the Kubelet fails to start.
func (e *E2EServices) startKubelet() (*server, error) {
glog.Info("Starting kubelet")
// Create pod manifest path
manifestPath, err := createPodManifestDirectory()
if err != nil {
return nil, err
}
e.rmDirs = append(e.rmDirs, manifestPath)
var killCommand, restartCommand *exec.Cmd
var isSystemd bool
// Apply default kubelet flags.
cmdArgs := []string{}
if systemdRun, err := exec.LookPath("systemd-run"); err == nil {
// On systemd services, detection of a service / unit works reliably while
// detection of a process started from an ssh session does not work.
// Since kubelet will typically be run as a service it also makes more
// sense to test it that way
isSystemd = true
unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31())
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--remain-after-exit", builder.GetKubeletServerBin())
killCommand = exec.Command("systemctl", "kill", unitName)
restartCommand = exec.Command("systemctl", "restart", unitName)
e.logFiles["kubelet.log"] = logFileData{
journalctlCommand: []string{"-u", unitName},
}
} else {
cmdArgs = append(cmdArgs, builder.GetKubeletServerBin())
cmdArgs = append(cmdArgs,
"--runtime-cgroups=/docker-daemon",
"--kubelet-cgroups=/kubelet",
"--cgroup-root=/",
"--system-cgroups=/system",
)
}
cmdArgs = append(cmdArgs,
"--api-servers", getAPIServerClientURL(),
"--address", "0.0.0.0",
"--port", kubeletPort,
"--read-only-port", kubeletReadOnlyPort,
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
"--allow-privileged", "true",
"--serialize-image-pulls", "false",
"--config", manifestPath,
"--file-check-frequency", "10s", // Check file frequently so tests won't wait too long
"--pod-cidr", "10.180.0.0/24", // Assign a fixed CIDR to the node because there is no node controller.
"--eviction-pressure-transition-period", "30s",
// Apply test framework feature gates by default. This could also be overridden
// by kubelet-flags.
"--feature-gates", framework.TestContext.FeatureGates,
"--eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", // The hard eviction thresholds.
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
)
// Enable kubenet by default.
cniDir, err := getCNIDirectory()
if err != nil {
return nil, err
}
cmdArgs = append(cmdArgs,
"--network-plugin=kubenet",
"--network-plugin-dir", cniDir)
// Keep hostname override for convenience.
if framework.TestContext.NodeName != "" { // If node name is specified, set hostname override.
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
}
// Override the default kubelet flags.
cmdArgs = append(cmdArgs, kubeletArgs...)
// Adjust the args if we are running kubelet with systemd.
if isSystemd {
adjustArgsForSystemd(cmdArgs)
}
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
server := newServer(
"kubelet",
cmd,
killCommand,
restartCommand,
[]string{kubeletHealthCheckURL},
"kubelet.log",
e.monitorParent,
true /* restartOnExit */)
return server, server.start()
}
// createPodManifestDirectory creates pod manifest directory.
func createPodManifestDirectory() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get current working directory: %v", err)
}
path, err := ioutil.TempDir(cwd, "pod-manifest")
if err != nil {
return "", fmt.Errorf("failed to create static pod manifest directory: %v", err)
}
return path, nil
}
// getCNIDirectory returns CNI directory.
func getCNIDirectory() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", err
}
// TODO(random-liu): Make sure the cni directory name is the same with that in remote/remote.go
return filepath.Join(cwd, "cni", "bin"), nil
}
// adjustArgsForSystemd escape special characters in kubelet arguments for systemd. Systemd
// may try to do auto expansion without escaping.
func adjustArgsForSystemd(args []string) {
for i := range args {
args[i] = strings.Replace(args[i], "%", "%%", -1)
args[i] = strings.Replace(args[i], "$", "$$", -1)
}
}

View File

@ -19,19 +19,15 @@ package services
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"github.com/golang/glog"
"github.com/kardianos/osext"
utilconfig "k8s.io/kubernetes/pkg/util/config"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e_node/builder"
)
// E2EServices starts and stops e2e services in a separate process. The test
@ -39,6 +35,7 @@ import (
type E2EServices struct {
// monitorParent determines whether the sub-processes should watch and die with the current
// process.
rmDirs []string
monitorParent bool
services *server
kubelet *server
@ -80,16 +77,6 @@ func (e *E2EServices) Start() error {
var err error
if !framework.TestContext.NodeConformance {
// Start kubelet
// Create the manifest path for kubelet.
// TODO(random-liu): Remove related logic when we move kubelet starting logic out of the test.
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("failed to get current working directory: %v", err)
}
framework.TestContext.ManifestPath, err = ioutil.TempDir(cwd, "pod-manifest")
if err != nil {
return fmt.Errorf("failed to create static pod manifest directory: %v", err)
}
e.kubelet, err = e.startKubelet()
if err != nil {
return fmt.Errorf("failed to start kubelet: %v", err)
@ -105,14 +92,6 @@ func (e *E2EServices) Stop() {
if !framework.TestContext.NodeConformance {
// Collect log files.
e.getLogFiles()
// Cleanup the manifest path for kubelet.
manifestPath := framework.TestContext.ManifestPath
if manifestPath != "" {
err := os.RemoveAll(manifestPath)
if err != nil {
glog.Errorf("Failed to delete static pod manifest directory %s: %v", manifestPath, err)
}
}
}
}()
if e.services != nil {
@ -125,6 +104,14 @@ func (e *E2EServices) Stop() {
glog.Errorf("Failed to stop kubelet: %v", err)
}
}
if e.rmDirs != nil {
for _, d := range e.rmDirs {
err := os.RemoveAll(d)
if err != nil {
glog.Errorf("Failed to delete directory %s: %v", d, err)
}
}
}
}
// RunE2EServices actually start the e2e services. This function is used to
@ -158,109 +145,6 @@ func (e *E2EServices) startInternalServices() (*server, error) {
return server, server.start()
}
const (
// Ports of different e2e services.
kubeletPort = "10250"
kubeletReadOnlyPort = "10255"
// Health check url of kubelet
kubeletHealthCheckURL = "http://127.0.0.1:" + kubeletReadOnlyPort + "/healthz"
)
// startKubelet starts the Kubelet in a separate process or returns an error
// if the Kubelet fails to start.
func (e *E2EServices) startKubelet() (*server, error) {
glog.Info("Starting kubelet")
var killCommand, restartCommand *exec.Cmd
cmdArgs := []string{}
if systemdRun, err := exec.LookPath("systemd-run"); err == nil {
// On systemd services, detection of a service / unit works reliably while
// detection of a process started from an ssh session does not work.
// Since kubelet will typically be run as a service it also makes more
// sense to test it that way
unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31())
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--remain-after-exit", builder.GetKubeletServerBin())
killCommand = exec.Command("systemctl", "kill", unitName)
restartCommand = exec.Command("systemctl", "restart", unitName)
e.logFiles["kubelet.log"] = logFileData{
journalctlCommand: []string{"-u", unitName},
}
framework.TestContext.EvictionHard = adjustConfigForSystemd(framework.TestContext.EvictionHard)
} else {
cmdArgs = append(cmdArgs, builder.GetKubeletServerBin())
cmdArgs = append(cmdArgs,
"--runtime-cgroups=/docker-daemon",
"--kubelet-cgroups=/kubelet",
"--cgroup-root=/",
"--system-cgroups=/system",
)
}
cmdArgs = append(cmdArgs,
"--api-servers", getAPIServerClientURL(),
"--address", "0.0.0.0",
"--port", kubeletPort,
"--read-only-port", kubeletReadOnlyPort,
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
"--allow-privileged", "true",
"--serialize-image-pulls", "false",
"--config", framework.TestContext.ManifestPath,
"--file-check-frequency", "10s", // Check file frequently so tests won't wait too long
"--pod-cidr=10.180.0.0/24", // Assign a fixed CIDR to the node because there is no node controller.
"--eviction-hard", framework.TestContext.EvictionHard,
"--eviction-pressure-transition-period", "30s",
"--feature-gates", framework.TestContext.FeatureGates,
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
"--experimental-mounter-path", framework.TestContext.MounterPath,
)
if framework.TestContext.NodeName != "" { // If node name is specified, set hostname override.
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
}
if framework.TestContext.EnableCRI {
cmdArgs = append(cmdArgs, "--experimental-cri", "true") // Whether to use experimental cri integration.
}
if framework.TestContext.ContainerRuntimeEndpoint != "" {
cmdArgs = append(cmdArgs, "--container-runtime-endpoint", framework.TestContext.ContainerRuntimeEndpoint)
}
if framework.TestContext.CgroupsPerQOS {
cmdArgs = append(cmdArgs,
"--experimental-cgroups-per-qos", "true",
"--cgroup-root", "/",
)
}
if framework.TestContext.CgroupDriver != "" {
cmdArgs = append(cmdArgs,
"--cgroup-driver", framework.TestContext.CgroupDriver,
)
}
if !framework.TestContext.DisableKubenet {
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
cmdArgs = append(cmdArgs,
"--network-plugin=kubenet",
// TODO(random-liu): Make sure the cni directory name is the same with that in remote/remote.go
"--network-plugin-dir", filepath.Join(cwd, "cni", "bin")) // Enable kubenet
}
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
server := newServer(
"kubelet",
cmd,
killCommand,
restartCommand,
[]string{kubeletHealthCheckURL},
"kubelet.log",
e.monitorParent,
true /* restartOnExit */)
return server, server.start()
}
func adjustConfigForSystemd(config string) string {
return strings.Replace(config, "%", "%%", -1)
}
// getLogFiles gets logs of interest either via journalctl or by creating sym
// links. Since we scp files from the remote directory, symlinks will be
// treated as normal files and file contents will be copied over.