From 2d6c2eb8976db71f5fe8a2afc1dc365efd8fdd39 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 13 Nov 2015 00:30:21 +0100 Subject: [PATCH] e2e: replace ssh with net=host hostexec pod and kubectl exec - document needed packages in hostexec image - add RunHostCmdOrDie - kube-proxy e2e: port from ssh to hostexec - use preset NodeName to schedule test pods to different nodes - parallel launch of pods - port from ssh to hostexec - add timeout because nc might block on udp - delete test container without grace period - PrivilegedPod e2e: port from ssh to hostexec - NodePort e2e: port from ssh to hostexec - cluster/mesos/docker: Enable privileged pods --- cluster/mesos/docker/docker-compose.yml | 2 + .../mesos/pkg/scheduler/service/service.go | 2 +- test/e2e/kubeproxy.go | 125 ++++++++++-------- test/e2e/privileged.go | 25 +--- test/e2e/service.go | 32 ++--- test/e2e/util.go | 52 +++++--- test/images/hostexec/Dockerfile | 3 + 7 files changed, 130 insertions(+), 111 deletions(-) diff --git a/cluster/mesos/docker/docker-compose.yml b/cluster/mesos/docker/docker-compose.yml index c24e24cae1..c61ab63da3 100644 --- a/cluster/mesos/docker/docker-compose.yml +++ b/cluster/mesos/docker/docker-compose.yml @@ -86,6 +86,7 @@ apiserver: --service-node-port-range=30000-32767 --cloud-provider=mesos --cloud-config=/opt/mesos-cloud.conf + --allow-privileged --tls-cert-file=/var/run/kubernetes/auth/apiserver.crt --tls-private-key-file=/var/run/kubernetes/auth/apiserver.key --runtime-config=experimental/v1alpha1 @@ -138,6 +139,7 @@ scheduler: --mesos-user=root --api-servers=http://apiserver:8888 --mesos-master=mesosmaster1:5050 + --allow-privileged --cluster-dns=10.10.10.10 --cluster-domain=cluster.local --mesos-executor-cpus=1.0 diff --git a/contrib/mesos/pkg/scheduler/service/service.go b/contrib/mesos/pkg/scheduler/service/service.go index ce892e433b..36c5ed12ba 100644 --- a/contrib/mesos/pkg/scheduler/service/service.go +++ b/contrib/mesos/pkg/scheduler/service/service.go @@ -228,7 +228,7 @@ func (s *SchedulerServer) addCoreFlags(fs *pflag.FlagSet) { fs.StringVar(&s.authPath, "auth-path", s.authPath, "Path to .kubernetes_auth file, specifying how to authenticate to API server.") fs.StringSliceVar(&s.etcdServerList, "etcd-servers", s.etcdServerList, "List of etcd servers to watch (http://ip:port), comma separated. Mutually exclusive with --etcd-config") fs.StringVar(&s.etcdConfigFile, "etcd-config", s.etcdConfigFile, "The config file for the etcd client. Mutually exclusive with --etcd-servers.") - fs.BoolVar(&s.allowPrivileged, "allow-privileged", s.allowPrivileged, "If true, allow privileged containers.") + fs.BoolVar(&s.allowPrivileged, "allow-privileged", s.allowPrivileged, "Enable privileged containers in the kubelet (compare the same flag in the apiserver).") fs.StringVar(&s.clusterDomain, "cluster-domain", s.clusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains") fs.IPVar(&s.clusterDNS, "cluster-dns", s.clusterDNS, "IP address for a cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution in addition to the host's DNS servers") fs.StringVar(&s.staticPodsConfigPath, "static-pods-config", s.staticPodsConfigPath, "Path for specification of static pods. Path should point to dir containing the staticPods configuration files. Defaults to none.") diff --git a/test/e2e/kubeproxy.go b/test/e2e/kubeproxy.go index 8b65578086..4021d3d4da 100644 --- a/test/e2e/kubeproxy.go +++ b/test/e2e/kubeproxy.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/intstr" @@ -40,7 +41,6 @@ import ( const ( endpointHttpPort = 8080 endpointUdpPort = 8081 - endpointHostPort = 8082 testContainerHttpPort = 8080 clusterHttpPort = 80 clusterUdpPort = 90 @@ -49,19 +49,21 @@ const ( loadBalancerHttpPort = 100 netexecImageName = "gcr.io/google_containers/netexec:1.0" testPodName = "test-container-pod" + hostTestPodName = "host-test-container-pod" nodePortServiceName = "node-port-service" loadBalancerServiceName = "load-balancer-service" enableLoadBalancerTest = false ) type KubeProxyTestConfig struct { - testContainerPod *api.Pod - testHostPod *api.Pod - endpointPods []*api.Pod - f *Framework - nodePortService *api.Service - loadBalancerService *api.Service - nodes []string + testContainerPod *api.Pod + hostTestContainerPod *api.Pod + endpointPods []*api.Pod + f *Framework + nodePortService *api.Service + loadBalancerService *api.Service + externalAddrs []string + nodes []api.Node } var _ = Describe("KubeProxy", func() { @@ -71,8 +73,6 @@ var _ = Describe("KubeProxy", func() { } It("should test kube-proxy", func() { - SkipUnlessProviderIs(providersWithSSH...) - By("cleaning up any pre-existing namespaces used by this test") config.cleanup() @@ -168,7 +168,7 @@ func (config *KubeProxyTestConfig) hitClusterIP(epCount int) { } func (config *KubeProxyTestConfig) hitNodePort(epCount int) { - node1_IP := config.nodes[0] + node1_IP := config.externalAddrs[0] tries := epCount*epCount + 5 // + 10 if epCount == 0 By("dialing(udp) node1 --> node1:nodeUdpPort") config.dialFromNode("udp", node1_IP, nodeUdpPort, tries, epCount) @@ -177,7 +177,7 @@ func (config *KubeProxyTestConfig) hitNodePort(epCount int) { By("dialing(udp) test container --> node1:nodeUdpPort") config.dialFromTestContainer("udp", node1_IP, nodeUdpPort, tries, epCount) - By("dialing(http) container --> node1:nodeHttpPort") + By("dialing(http) test container --> node1:nodeHttpPort") config.dialFromTestContainer("http", node1_IP, nodeHttpPort, tries, epCount) By("dialing(udp) endpoint container --> node1:nodeUdpPort") @@ -192,7 +192,7 @@ func (config *KubeProxyTestConfig) hitNodePort(epCount int) { By("Test disabled. dialing(http) node --> 127.0.0.1:nodeHttpPort") //config.dialFromNode("http", "127.0.0.1", nodeHttpPort, tries, epCount) - node2_IP := config.nodes[1] + node2_IP := config.externalAddrs[1] By("dialing(udp) node1 --> node2:nodeUdpPort") config.dialFromNode("udp", node2_IP, nodeUdpPort, tries, epCount) By("dialing(http) node1 --> node2:nodeHttpPort") @@ -231,7 +231,7 @@ func (config *KubeProxyTestConfig) dialFromContainer(protocol, containerIP, targ tries) By(fmt.Sprintf("Dialing from container. Running command:%s", cmd)) - stdout := config.ssh(cmd) + stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd) var output map[string][]string err := json.Unmarshal([]byte(stdout), &output) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout)) @@ -242,24 +242,17 @@ func (config *KubeProxyTestConfig) dialFromContainer(protocol, containerIP, targ func (config *KubeProxyTestConfig) dialFromNode(protocol, targetIP string, targetPort, tries, expectedCount int) { var cmd string if protocol == "udp" { - cmd = fmt.Sprintf("echo 'hostName' | nc -w 1 -u %s %d", targetIP, targetPort) + cmd = fmt.Sprintf("echo 'hostName' | timeout -t 3 nc -w 1 -u %s %d", targetIP, targetPort) } else { cmd = fmt.Sprintf("curl -s --connect-timeout 1 http://%s:%d/hostName", targetIP, targetPort) } forLoop := fmt.Sprintf("for i in $(seq 1 %d); do %s; echo; done | grep -v '^\\s*$' |sort | uniq -c | wc -l", tries, cmd) By(fmt.Sprintf("Dialing from node. command:%s", forLoop)) - stdout := config.ssh(forLoop) + stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop) Expect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically("==", expectedCount)) } -func (config *KubeProxyTestConfig) ssh(cmd string) string { - stdout, _, code, err := SSH(cmd, config.nodes[0]+":22", testContext.Provider) - Expect(err).NotTo(HaveOccurred(), "error while SSH-ing to node: %v (code %v)", err, code) - Expect(code).Should(BeZero(), "command exited with non-zero code %v. cmd:%s", code, cmd) - return stdout -} - -func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string) *api.Pod { +func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod { pod := &api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", @@ -288,15 +281,12 @@ func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string) *api.Po { Name: "udp", ContainerPort: endpointUdpPort, - }, - { - Name: "host", - ContainerPort: endpointHttpPort, - HostPort: endpointHostPort, + Protocol: api.ProtocolUDP, }, }, }, }, + NodeName: node, }, } return pod @@ -344,8 +334,8 @@ func (config *KubeProxyTestConfig) createNodePortService(selector map[string]str Spec: api.ServiceSpec{ Type: api.ServiceTypeNodePort, Ports: []api.ServicePort{ - {Port: clusterHttpPort, Name: "http", Protocol: "TCP", NodePort: nodeHttpPort, TargetPort: intstr.FromInt(endpointHttpPort)}, - {Port: clusterUdpPort, Name: "udp", Protocol: "UDP", NodePort: nodeUdpPort, TargetPort: intstr.FromInt(endpointUdpPort)}, + {Port: clusterHttpPort, Name: "http", Protocol: api.ProtocolTCP, NodePort: nodeHttpPort, TargetPort: intstr.FromInt(endpointHttpPort)}, + {Port: clusterUdpPort, Name: "udp", Protocol: api.ProtocolUDP, NodePort: nodeUdpPort, TargetPort: intstr.FromInt(endpointUdpPort)}, }, Selector: selector, }, @@ -397,9 +387,26 @@ func (config *KubeProxyTestConfig) waitForLoadBalancerIngressSetup() { config.loadBalancerService, _ = config.getServiceClient().Get(loadBalancerServiceName) } -func (config *KubeProxyTestConfig) createTestPod() { +func (config *KubeProxyTestConfig) createTestPods() { testContainerPod := config.createTestPodSpec() - config.testContainerPod = config.createPod(testContainerPod) + hostTestContainerPod := NewHostExecPodSpec(config.f.Namespace.Name, hostTestPodName) + + config.createPod(testContainerPod) + config.createPod(hostTestContainerPod) + + expectNoError(config.f.WaitForPodRunning(testContainerPod.Name)) + expectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name)) + + var err error + config.testContainerPod, err = config.getPodClient().Get(testContainerPod.Name) + if err != nil { + Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err) + } + + config.hostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name) + if err != nil { + Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err) + } } func (config *KubeProxyTestConfig) createService(serviceSpec *api.Service) *api.Service { @@ -422,13 +429,16 @@ func (config *KubeProxyTestConfig) setup() { selectorName: "true", } - By("Getting ssh-able hosts") - hosts, err := NodeSSHHosts(config.f.Client) - Expect(err).NotTo(HaveOccurred()) - config.nodes = make([]string, 0, len(hosts)) - for _, h := range hosts { - config.nodes = append(config.nodes, strings.TrimSuffix(h, ":22")) + By("Getting two nodes") + nodeList, err := config.f.Client.Nodes().List(labels.Everything(), fields.Everything()) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get node list: %v", err)) + config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP) + if len(config.externalAddrs) < 2 { + // fall back to legacy IPs + config.externalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP) } + Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP")) + config.nodes = nodeList.Items if enableLoadBalancerTest { By("Creating the LoadBalancer Service on top of the pods in kubernetes") @@ -437,13 +447,13 @@ func (config *KubeProxyTestConfig) setup() { By("Creating the service pods in kubernetes") podName := "netserver" - config.endpointPods = config.createNetProxyPods(podName, serviceSelector, testContext.CloudConfig.NumNodes) + config.endpointPods = config.createNetProxyPods(podName, serviceSelector) By("Creating the service on top of the pods in kubernetes") config.createNodePortService(serviceSelector) By("Creating test pods") - config.createTestPod() + config.createTestPods() } func (config *KubeProxyTestConfig) cleanup() { @@ -458,23 +468,35 @@ func (config *KubeProxyTestConfig) cleanup() { } } -func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string, nodeCount int) []*api.Pod { - //testContext.CloudConfig.NumNodes - pods := make([]*api.Pod, 0) +func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { + nodes, err := config.f.Client.Nodes().List(labels.Everything(), fields.Everything()) + Expect(err).NotTo(HaveOccurred()) - for i := 0; i < nodeCount; i++ { + // create pods, one for each node + createdPods := make([]*api.Pod, 0, len(nodes.Items)) + for i, n := range nodes.Items { podName := fmt.Sprintf("%s-%d", podName, i) - pod := config.createNetShellPodSpec(podName) + pod := config.createNetShellPodSpec(podName, n.Name) pod.ObjectMeta.Labels = selector createdPod := config.createPod(pod) - pods = append(pods, createdPod) + createdPods = append(createdPods, createdPod) } - return pods + + // wait that all of them are up + runningPods := make([]*api.Pod, 0, len(nodes.Items)) + for _, p := range createdPods { + expectNoError(config.f.WaitForPodRunning(p.Name)) + rp, err := config.getPodClient().Get(p.Name) + expectNoError(err) + runningPods = append(runningPods, rp) + } + + return runningPods } func (config *KubeProxyTestConfig) deleteNetProxyPod() { pod := config.endpointPods[0] - config.getPodClient().Delete(pod.Name, nil) + config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0)) config.endpointPods = config.endpointPods[1:] // wait for pod being deleted. err := waitForPodToDisappear(config.f.Client, config.f.Namespace.Name, pod.Name, labels.Everything(), time.Second, util.ForeverTestTimeout) @@ -495,11 +517,6 @@ func (config *KubeProxyTestConfig) createPod(pod *api.Pod) *api.Pod { if err != nil { Failf("Failed to create %s pod: %v", pod.Name, err) } - expectNoError(config.f.WaitForPodRunning(pod.Name)) - createdPod, err = config.getPodClient().Get(pod.Name) - if err != nil { - Failf("Failed to retrieve %s pod: %v", pod.Name, err) - } return createdPod } diff --git a/test/e2e/privileged.go b/test/e2e/privileged.go index b2cf98e08f..9385004f62 100644 --- a/test/e2e/privileged.go +++ b/test/e2e/privileged.go @@ -43,7 +43,7 @@ const ( type PrivilegedPodTestConfig struct { privilegedPod *api.Pod f *Framework - nodes []string + hostExecPod *api.Pod } var _ = Describe("PrivilegedPod", func() { @@ -52,15 +52,10 @@ var _ = Describe("PrivilegedPod", func() { f: f, } It("should test privileged pod", func() { - SkipUnlessProviderIs(providersWithSSH...) - - By("Getting ssh-able hosts") - hosts, err := NodeSSHHosts(config.f.Client) - Expect(err).NotTo(HaveOccurred()) - if len(hosts) == 0 { - Failf("No ssh-able nodes") - } - config.nodes = hosts + hostExecPod := NewHostExecPodSpec(f.Namespace.Name, "hostexec") + pod, err := config.getPodClient().Create(hostExecPod) + expectNoError(err) + config.hostExecPod = pod By("Creating a privileged pod") config.createPrivilegedPod() @@ -96,8 +91,7 @@ func (config *PrivilegedPodTestConfig) dialFromContainer(containerIP string, con v.Encode()) By(fmt.Sprintf("Exec-ing into container over http. Running command:%s", cmd)) - stdout := config.ssh(cmd) - Logf("Output is %q", stdout) + stdout := RunHostCmdOrDie(config.hostExecPod.Namespace, config.hostExecPod.Name, cmd) var output map[string]string err := json.Unmarshal([]byte(stdout), &output) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout)) @@ -172,10 +166,3 @@ func (config *PrivilegedPodTestConfig) getPodClient() client.PodInterface { func (config *PrivilegedPodTestConfig) getNamespaceClient() client.NamespaceInterface { return config.f.Client.Namespaces() } - -func (config *PrivilegedPodTestConfig) ssh(cmd string) string { - stdout, _, code, err := SSH(cmd, config.nodes[0], testContext.Provider) - Expect(err).NotTo(HaveOccurred(), "error while SSH-ing to node: %v (code %v)", err, code) - Expect(code).Should(BeZero(), "command exited with non-zero code %v. cmd:%s", code, cmd) - return stdout -} diff --git a/test/e2e/service.go b/test/e2e/service.go index be8d14d9f4..73871b7f00 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -394,17 +394,11 @@ var _ = Describe("Services", func() { ip := pickNodeIP(c) testReachable(ip, nodePort) - // this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP - if providerIs(providersWithSSH...) { - hosts, err := NodeSSHHosts(c) - if err != nil { - Expect(err).NotTo(HaveOccurred()) - } - cmd := fmt.Sprintf(`test -n "$(ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN)"`, nodePort) - _, _, code, err := SSH(cmd, hosts[0], testContext.Provider) - if code != 0 { - Failf("expected node port (%d) to be in use", nodePort) - } + hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") + cmd := fmt.Sprintf(`ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) + stdout, err := RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) + if err != nil { + Failf("expected node port (%d) to be in use, stdout: %v", nodePort, stdout) } }) @@ -759,17 +753,11 @@ var _ = Describe("Services", func() { err = t.DeleteService(serviceName) Expect(err).NotTo(HaveOccurred()) - // this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP - if providerIs(providersWithSSH...) { - hosts, err := NodeSSHHosts(c) - if err != nil { - Expect(err).NotTo(HaveOccurred()) - } - cmd := fmt.Sprintf(`test -n "$(ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN)"`, nodePort) - _, _, code, err := SSH(cmd, hosts[0], testContext.Provider) - if code == 0 { - Failf("expected node port (%d) to not be in use", nodePort) - } + hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") + cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) + stdout, err := RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) + if err != nil { + Failf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout) } By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) diff --git a/test/e2e/util.go b/test/e2e/util.go index 7e1adc82f1..08f644161f 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -22,6 +22,7 @@ import ( "io" "math" "math/rand" + "net" "net/http" "net/url" "os" @@ -1077,7 +1078,8 @@ func (b kubectlBuilder) exec() (string, error) { if err := cmd.Run(); err != nil { return "", fmt.Errorf("Error running %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr) } - Logf(stdout.String()) + Logf("stdout: %q", stdout.String()) + Logf("stderr: %q", stderr.String()) // TODO: trimspace should be unnecessary after switching to use kubectl binary directly return strings.TrimSpace(stdout.String()), nil } @@ -1883,27 +1885,35 @@ func BadEvents(events []*api.Event) int { return badEvents } -// NodeSSHHosts returns SSH-able host names for all nodes. It returns an error -// if it can't find an external IP for every node, though it still returns all -// hosts that it found in that case. -func NodeSSHHosts(c *client.Client) ([]string, error) { - var hosts []string - nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything()) - if err != nil { - return hosts, fmt.Errorf("error getting nodes: %v", err) - } +// NodeAddresses returns the first address of the given type of each node. +func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []string { + hosts := []string{} for _, n := range nodelist.Items { for _, addr := range n.Status.Addresses { // Use the first external IP address we find on the node, and // use at most one per node. // TODO(roberthbailey): Use the "preferred" address for the node, once // such a thing is defined (#2462). - if addr.Type == api.NodeExternalIP { - hosts = append(hosts, addr.Address+":22") + if addr.Type == addrType { + hosts = append(hosts, addr.Address) break } } } + return hosts +} + +// NodeSSHHosts returns SSH-able host names for all nodes. It returns an error +// if it can't find an external IP for every node, though it still returns all +// hosts that it found in that case. +func NodeSSHHosts(c *client.Client) ([]string, error) { + nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything()) + if err != nil { + return nil, fmt.Errorf("error getting nodes: %v", err) + } + + // TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462). + hosts := NodeAddresses(nodelist, api.NodeExternalIP) // Error if any node didn't have an external IP. if len(hosts) != len(nodelist.Items) { @@ -1911,7 +1921,12 @@ func NodeSSHHosts(c *client.Client) ([]string, error) { "only found %d external IPs on nodes, but found %d nodes. Nodelist: %v", len(hosts), len(nodelist.Items), nodelist) } - return hosts, nil + + sshHosts := make([]string, 0, len(hosts)) + for _, h := range hosts { + sshHosts = append(sshHosts, net.JoinHostPort(h, "22")) + } + return sshHosts, nil } // SSH synchronously SSHs to a node running on provider and runs cmd. If there @@ -1982,8 +1997,15 @@ func NewHostExecPodSpec(ns, name string) *api.Pod { // RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec` // inside of a shell. -func RunHostCmd(ns, name, cmd string) string { - return runKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd) +func RunHostCmd(ns, name, cmd string) (string, error) { + return runKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd) +} + +// RunHostCmdOrDie calls RunHostCmd and dies on error. +func RunHostCmdOrDie(ns, name, cmd string) string { + stdout, err := RunHostCmd(ns, name, cmd) + expectNoError(err) + return stdout } // LaunchHostExecPod launches a hostexec pod in the given namespace and waits diff --git a/test/images/hostexec/Dockerfile b/test/images/hostexec/Dockerfile index aa6c1dbbda..a81d3e7255 100644 --- a/test/images/hostexec/Dockerfile +++ b/test/images/hostexec/Dockerfile @@ -14,6 +14,9 @@ FROM alpine:3.2 +# install necessary packages: +# - curl, nc: used by a lot of e2e tests +# - iproute2: includes ss used in NodePort tests run apk --update add curl netcat-openbsd iproute2 && rm -rf /var/cache/apk/* # wait forever