Merge pull request #29020 from Random-Liu/add-namespace-controller-in-node-e2e

Automatic merge from submit-queue

Start namespace controller in node e2e

Fix https://github.com/kubernetes/kubernetes/issues/28320.
Based on https://github.com/kubernetes/kubernetes/pull/28807, only the last 2 commits are new.

Before this PR, there was no namespace controller running in node e2e test infrastructure. We can not enable the [`delete-namespace`](f2ddd60eb9/test/e2e/framework/test_context.go (L109)) flag in the test framework.
So after the test running, there will be running pod left on the test node. This seems to be acceptable in our test infrastructure because we create an new instance each time.

However, in 1.4 we may want to provide part of the test as node conformance test to the user, they definitely don't want the test to leave tons of pods on their node after test running.

Currently, there is no easy way to only start namespace controller in kube-controller-manager (confirmed with @mikedanese), so in this PR I started a "uncontainerized" one in the test infrastructure.

This PR:
* Started the namespace controller in the node e2e test infrastructure and enable the automatic namespace deletion.
* Change the privileged test to use framework (@yujuhong), so that all node e2e tests are using the framework and test pods will be cleaned up by namespace controller.

/cc @kubernetes/sig-node
pull/6/head
k8s-merge-robot 2016-07-20 09:24:26 -07:00 committed by GitHub
commit d1fba05a1b
4 changed files with 46 additions and 79 deletions

View File

@ -54,23 +54,22 @@ func (c *PodClient) Create(pod *api.Pod) *api.Pod {
func (c *PodClient) CreateSync(pod *api.Pod) *api.Pod {
p := c.Create(pod)
ExpectNoError(c.f.WaitForPodRunning(p.Name))
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
p, err := c.Get(pod.Name)
ExpectNoError(err)
return p
}
// CreateBatch create a batch of pods. All pods are created before waiting.
func (c *PodClient) CreateBatch(pods []*api.Pod) []*api.Pod {
ps := make([]*api.Pod, len(pods))
for i, pod := range pods {
ps[i] = c.Create(pod)
}
var wg sync.WaitGroup
for _, pod := range ps {
for i, pod := range pods {
wg.Add(1)
podName := pod.Name
go func() {
ExpectNoError(c.f.WaitForPodRunning(podName))
go func(i int, pod *api.Pod) {
ps[i] = c.CreateSync(pod)
wg.Done()
}()
}(i, pod)
}
wg.Wait()
return ps

View File

@ -103,14 +103,12 @@ func RegisterCommonFlags() {
flag.BoolVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after each test.")
flag.StringVar(&TestContext.OutputPrintType, "output-print-type", "hr", "Comma separated list: 'hr' for human readable summaries 'json' for JSON ones.")
flag.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
flag.BoolVar(&TestContext.DeleteNamespace, "delete-namespace", true, "If true tests will delete namespace after completion. It is only designed to make debugging easier, DO NOT turn it off by default.")
flag.StringVar(&TestContext.Host, "host", "http://127.0.0.1:8080", "The host, or apiserver, to connect to")
}
// Register flags specific to the cluster e2e test suite.
func RegisterClusterFlags() {
// TODO: Move to common flags once namespace deletion is fixed for node e2e.
flag.BoolVar(&TestContext.DeleteNamespace, "delete-namespace", true, "If true tests will delete namespace after completion. It is only designed to make debugging easier, DO NOT turn it off by default.")
flag.BoolVar(&TestContext.VerifyServiceAccount, "e2e-verify-service-account", true, "If true tests will verify the service account before running.")
flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.")
flag.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'")

View File

@ -31,6 +31,12 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
"k8s.io/kubernetes/pkg/util/wait"
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
@ -106,6 +112,9 @@ var _ = BeforeSuite(func() {
glog.Infof("Running tests without starting services.")
}
glog.Infof("Starting namespace controller")
startNamespaceController()
// Reference common test to make the import valid.
commontest.CurrentSuite = commontest.NodeE2E
})
@ -135,3 +144,22 @@ func maskLocksmithdOnCoreos() {
glog.Infof("Locksmithd is masked successfully")
}
}
const (
// ncResyncPeriod is resync period of the namespace controller
ncResyncPeriod = 5 * time.Minute
// ncConcurrency is concurrency of the namespace controller
ncConcurrency = 2
)
func startNamespaceController() {
// Use the default QPS
config := restclient.AddUserAgent(&restclient.Config{Host: framework.TestContext.Host}, "node-e2e-namespace-controller")
client, err := clientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
clientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc)
resources, err := client.Discovery().ServerPreferredNamespacedResources()
Expect(err).NotTo(HaveOccurred())
nc := namespacecontroller.NewNamespaceController(client, clientPool, resources, ncResyncPeriod, api.FinalizerKubernetes)
go nc.Run(ncConcurrency, wait.NeverStop)
}

View File

@ -20,11 +20,8 @@ import (
"encoding/json"
"fmt"
"net/url"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
@ -56,25 +53,18 @@ type PrivilegedPodTestConfig struct {
// TODO(random-liu): Change the test to use framework and framework pod client.
var _ = Describe("PrivilegedPod", func() {
var c *client.Client
var restClientConfig *restclient.Config
BeforeEach(func() {
// Setup the apiserver client
restClientConfig = &restclient.Config{Host: framework.TestContext.Host}
c = client.NewOrDie(restClientConfig)
})
f := framework.NewDefaultFramework("privileged-pod")
It("should test privileged pod", func() {
namespace := "privileged-pods"
config := &PrivilegedPodTestConfig{
client: c,
config: restClientConfig,
namespace: namespace,
client: f.Client,
config: &restclient.Config{Host: framework.TestContext.Host},
namespace: f.Namespace.Name,
}
By("Creating a host exec pod")
config.hostExecPod = createPodAndWaitUntilRunning(c, newHostExecPodSpec(config.namespace, "hostexec"))
config.hostExecPod = f.PodClient().CreateSync(newHostExecPodSpec("hostexec"))
By("Creating a privileged pod")
config.privilegedPod = createPodAndWaitUntilRunning(c, config.createPrivilegedPodSpec())
config.privilegedPod = f.PodClient().CreateSync(config.createPrivilegedPodSpec())
By("Executing privileged command on privileged container")
config.runPrivilegedCommandOnPrivilegedContainer()
@ -88,13 +78,8 @@ func (config *PrivilegedPodTestConfig) createPrivilegedPodSpec() *api.Pod {
isPrivileged := true
notPrivileged := false
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
},
ObjectMeta: api.ObjectMeta{
Name: privilegedPodName,
Namespace: config.namespace,
Name: privilegedPodName,
},
Spec: api.PodSpec{
Containers: []api.Container{
@ -145,7 +130,7 @@ func (config *PrivilegedPodTestConfig) dialFromContainer(containerIP string, con
v.Encode())
By(fmt.Sprintf("Exec-ing into container over http. Running command: %s", cmd))
stdout, err := execCommandInContainer(config.config, config.client, config.hostExecPod.Namespace, config.hostExecPod.Name, config.hostExecPod.Spec.Containers[0].Name,
stdout, err := execCommandInContainer(config.config, config.client, config.namespace, config.hostExecPod.Name, config.hostExecPod.Spec.Containers[0].Name,
[]string{"/bin/sh", "-c", cmd})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Error running command %q: %v", cmd, err))
@ -156,15 +141,10 @@ func (config *PrivilegedPodTestConfig) dialFromContainer(containerIP string, con
}
// newHostExecPodSpec returns the pod spec of hostexec pod
func newHostExecPodSpec(ns, name string) *api.Pod {
func newHostExecPodSpec(name string) *api.Pod {
return &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
},
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
Name: name,
},
Spec: api.PodSpec{
Containers: []api.Container{
@ -180,41 +160,3 @@ func newHostExecPodSpec(ns, name string) *api.Pod {
},
}
}
// TODO: This is a very basic helper function. Remove this once a common
// utility function has been ported from the the e2e package.
func waitForPodRunning(c *client.Client, ns string, name string) error {
var pod *api.Pod
var err error
for t := time.Now(); time.Since(t) < time.Minute*2; time.Sleep(time.Second * 5) {
pod, err = c.Pods(ns).Get(name)
Expect(err).NotTo(HaveOccurred())
if pod == nil {
continue
}
if pod.Status.Phase != api.PodRunning {
continue
}
return nil
}
return fmt.Errorf("Time out while waiting for pod %s/%s to become running; current status: %+v", ns, name, pod.Status)
}
func createPodAndWaitUntilRunning(c *client.Client, pod *api.Pod) *api.Pod {
ref := fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
_, err := createPodWithSpec(c, pod)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create pod %q: %v", ref, err))
err = waitForPodRunning(c, pod.Namespace, pod.Name)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed waiting for pod %q to become running: %v", ref, err))
runningPod, err := c.Pods(pod.Namespace).Get(pod.Name)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to retrieve pod %q: %v", ref, err))
return runningPod
}
func createPodWithSpec(c *client.Client, pod *api.Pod) (*api.Pod, error) {
// Manually assign pod to node because we don't run the scheduler in node
// e2e tests.
pod.Spec.NodeName = framework.TestContext.NodeName
createdPod, err := c.Pods(pod.Namespace).Create(pod)
return createdPod, err
}