e2e: replace ssh with net=host hostexec pod and kubectl exec

- document needed packages in hostexec image
- add RunHostCmdOrDie
- kube-proxy e2e: port from ssh to hostexec
  - use preset NodeName to schedule test pods to different nodes
  - parallel launch of pods
  - port from ssh to hostexec
  - add timeout because nc might block on udp
  - delete test container without grace period
- PrivilegedPod e2e: port from ssh to hostexec
- NodePort e2e: port from ssh to hostexec
- cluster/mesos/docker: Enable privileged pods
pull/6/head
Dr. Stefan Schimanski 2015-11-13 00:30:21 +01:00
parent 7ad8bb1787
commit 2d6c2eb897
7 changed files with 130 additions and 111 deletions

View File

@ -86,6 +86,7 @@ apiserver:
--service-node-port-range=30000-32767 --service-node-port-range=30000-32767
--cloud-provider=mesos --cloud-provider=mesos
--cloud-config=/opt/mesos-cloud.conf --cloud-config=/opt/mesos-cloud.conf
--allow-privileged
--tls-cert-file=/var/run/kubernetes/auth/apiserver.crt --tls-cert-file=/var/run/kubernetes/auth/apiserver.crt
--tls-private-key-file=/var/run/kubernetes/auth/apiserver.key --tls-private-key-file=/var/run/kubernetes/auth/apiserver.key
--runtime-config=experimental/v1alpha1 --runtime-config=experimental/v1alpha1
@ -138,6 +139,7 @@ scheduler:
--mesos-user=root --mesos-user=root
--api-servers=http://apiserver:8888 --api-servers=http://apiserver:8888
--mesos-master=mesosmaster1:5050 --mesos-master=mesosmaster1:5050
--allow-privileged
--cluster-dns=10.10.10.10 --cluster-dns=10.10.10.10
--cluster-domain=cluster.local --cluster-domain=cluster.local
--mesos-executor-cpus=1.0 --mesos-executor-cpus=1.0

View File

@ -228,7 +228,7 @@ func (s *SchedulerServer) addCoreFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.authPath, "auth-path", s.authPath, "Path to .kubernetes_auth file, specifying how to authenticate to API server.") fs.StringVar(&s.authPath, "auth-path", s.authPath, "Path to .kubernetes_auth file, specifying how to authenticate to API server.")
fs.StringSliceVar(&s.etcdServerList, "etcd-servers", s.etcdServerList, "List of etcd servers to watch (http://ip:port), comma separated. Mutually exclusive with --etcd-config") fs.StringSliceVar(&s.etcdServerList, "etcd-servers", s.etcdServerList, "List of etcd servers to watch (http://ip:port), comma separated. Mutually exclusive with --etcd-config")
fs.StringVar(&s.etcdConfigFile, "etcd-config", s.etcdConfigFile, "The config file for the etcd client. Mutually exclusive with --etcd-servers.") fs.StringVar(&s.etcdConfigFile, "etcd-config", s.etcdConfigFile, "The config file for the etcd client. Mutually exclusive with --etcd-servers.")
fs.BoolVar(&s.allowPrivileged, "allow-privileged", s.allowPrivileged, "If true, allow privileged containers.") fs.BoolVar(&s.allowPrivileged, "allow-privileged", s.allowPrivileged, "Enable privileged containers in the kubelet (compare the same flag in the apiserver).")
fs.StringVar(&s.clusterDomain, "cluster-domain", s.clusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains") fs.StringVar(&s.clusterDomain, "cluster-domain", s.clusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains")
fs.IPVar(&s.clusterDNS, "cluster-dns", s.clusterDNS, "IP address for a cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution in addition to the host's DNS servers") fs.IPVar(&s.clusterDNS, "cluster-dns", s.clusterDNS, "IP address for a cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution in addition to the host's DNS servers")
fs.StringVar(&s.staticPodsConfigPath, "static-pods-config", s.staticPodsConfigPath, "Path for specification of static pods. Path should point to dir containing the staticPods configuration files. Defaults to none.") fs.StringVar(&s.staticPodsConfigPath, "static-pods-config", s.staticPodsConfigPath, "Path for specification of static pods. Path should point to dir containing the staticPods configuration files. Defaults to none.")

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@ -40,7 +41,6 @@ import (
const ( const (
endpointHttpPort = 8080 endpointHttpPort = 8080
endpointUdpPort = 8081 endpointUdpPort = 8081
endpointHostPort = 8082
testContainerHttpPort = 8080 testContainerHttpPort = 8080
clusterHttpPort = 80 clusterHttpPort = 80
clusterUdpPort = 90 clusterUdpPort = 90
@ -49,6 +49,7 @@ const (
loadBalancerHttpPort = 100 loadBalancerHttpPort = 100
netexecImageName = "gcr.io/google_containers/netexec:1.0" netexecImageName = "gcr.io/google_containers/netexec:1.0"
testPodName = "test-container-pod" testPodName = "test-container-pod"
hostTestPodName = "host-test-container-pod"
nodePortServiceName = "node-port-service" nodePortServiceName = "node-port-service"
loadBalancerServiceName = "load-balancer-service" loadBalancerServiceName = "load-balancer-service"
enableLoadBalancerTest = false enableLoadBalancerTest = false
@ -56,12 +57,13 @@ const (
type KubeProxyTestConfig struct { type KubeProxyTestConfig struct {
testContainerPod *api.Pod testContainerPod *api.Pod
testHostPod *api.Pod hostTestContainerPod *api.Pod
endpointPods []*api.Pod endpointPods []*api.Pod
f *Framework f *Framework
nodePortService *api.Service nodePortService *api.Service
loadBalancerService *api.Service loadBalancerService *api.Service
nodes []string externalAddrs []string
nodes []api.Node
} }
var _ = Describe("KubeProxy", func() { var _ = Describe("KubeProxy", func() {
@ -71,8 +73,6 @@ var _ = Describe("KubeProxy", func() {
} }
It("should test kube-proxy", func() { It("should test kube-proxy", func() {
SkipUnlessProviderIs(providersWithSSH...)
By("cleaning up any pre-existing namespaces used by this test") By("cleaning up any pre-existing namespaces used by this test")
config.cleanup() config.cleanup()
@ -168,7 +168,7 @@ func (config *KubeProxyTestConfig) hitClusterIP(epCount int) {
} }
func (config *KubeProxyTestConfig) hitNodePort(epCount int) { func (config *KubeProxyTestConfig) hitNodePort(epCount int) {
node1_IP := config.nodes[0] node1_IP := config.externalAddrs[0]
tries := epCount*epCount + 5 // + 10 if epCount == 0 tries := epCount*epCount + 5 // + 10 if epCount == 0
By("dialing(udp) node1 --> node1:nodeUdpPort") By("dialing(udp) node1 --> node1:nodeUdpPort")
config.dialFromNode("udp", node1_IP, nodeUdpPort, tries, epCount) config.dialFromNode("udp", node1_IP, nodeUdpPort, tries, epCount)
@ -177,7 +177,7 @@ func (config *KubeProxyTestConfig) hitNodePort(epCount int) {
By("dialing(udp) test container --> node1:nodeUdpPort") By("dialing(udp) test container --> node1:nodeUdpPort")
config.dialFromTestContainer("udp", node1_IP, nodeUdpPort, tries, epCount) config.dialFromTestContainer("udp", node1_IP, nodeUdpPort, tries, epCount)
By("dialing(http) container --> node1:nodeHttpPort") By("dialing(http) test container --> node1:nodeHttpPort")
config.dialFromTestContainer("http", node1_IP, nodeHttpPort, tries, epCount) config.dialFromTestContainer("http", node1_IP, nodeHttpPort, tries, epCount)
By("dialing(udp) endpoint container --> node1:nodeUdpPort") By("dialing(udp) endpoint container --> node1:nodeUdpPort")
@ -192,7 +192,7 @@ func (config *KubeProxyTestConfig) hitNodePort(epCount int) {
By("Test disabled. dialing(http) node --> 127.0.0.1:nodeHttpPort") By("Test disabled. dialing(http) node --> 127.0.0.1:nodeHttpPort")
//config.dialFromNode("http", "127.0.0.1", nodeHttpPort, tries, epCount) //config.dialFromNode("http", "127.0.0.1", nodeHttpPort, tries, epCount)
node2_IP := config.nodes[1] node2_IP := config.externalAddrs[1]
By("dialing(udp) node1 --> node2:nodeUdpPort") By("dialing(udp) node1 --> node2:nodeUdpPort")
config.dialFromNode("udp", node2_IP, nodeUdpPort, tries, epCount) config.dialFromNode("udp", node2_IP, nodeUdpPort, tries, epCount)
By("dialing(http) node1 --> node2:nodeHttpPort") By("dialing(http) node1 --> node2:nodeHttpPort")
@ -231,7 +231,7 @@ func (config *KubeProxyTestConfig) dialFromContainer(protocol, containerIP, targ
tries) tries)
By(fmt.Sprintf("Dialing from container. Running command:%s", cmd)) By(fmt.Sprintf("Dialing from container. Running command:%s", cmd))
stdout := config.ssh(cmd) stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd)
var output map[string][]string var output map[string][]string
err := json.Unmarshal([]byte(stdout), &output) err := json.Unmarshal([]byte(stdout), &output)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout))
@ -242,24 +242,17 @@ func (config *KubeProxyTestConfig) dialFromContainer(protocol, containerIP, targ
func (config *KubeProxyTestConfig) dialFromNode(protocol, targetIP string, targetPort, tries, expectedCount int) { func (config *KubeProxyTestConfig) dialFromNode(protocol, targetIP string, targetPort, tries, expectedCount int) {
var cmd string var cmd string
if protocol == "udp" { if protocol == "udp" {
cmd = fmt.Sprintf("echo 'hostName' | nc -w 1 -u %s %d", targetIP, targetPort) cmd = fmt.Sprintf("echo 'hostName' | timeout -t 3 nc -w 1 -u %s %d", targetIP, targetPort)
} else { } else {
cmd = fmt.Sprintf("curl -s --connect-timeout 1 http://%s:%d/hostName", targetIP, targetPort) cmd = fmt.Sprintf("curl -s --connect-timeout 1 http://%s:%d/hostName", targetIP, targetPort)
} }
forLoop := fmt.Sprintf("for i in $(seq 1 %d); do %s; echo; done | grep -v '^\\s*$' |sort | uniq -c | wc -l", tries, cmd) forLoop := fmt.Sprintf("for i in $(seq 1 %d); do %s; echo; done | grep -v '^\\s*$' |sort | uniq -c | wc -l", tries, cmd)
By(fmt.Sprintf("Dialing from node. command:%s", forLoop)) By(fmt.Sprintf("Dialing from node. command:%s", forLoop))
stdout := config.ssh(forLoop) stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop)
Expect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically("==", expectedCount)) Expect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically("==", expectedCount))
} }
func (config *KubeProxyTestConfig) ssh(cmd string) string { func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod {
stdout, _, code, err := SSH(cmd, config.nodes[0]+":22", testContext.Provider)
Expect(err).NotTo(HaveOccurred(), "error while SSH-ing to node: %v (code %v)", err, code)
Expect(code).Should(BeZero(), "command exited with non-zero code %v. cmd:%s", code, cmd)
return stdout
}
func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string) *api.Pod {
pod := &api.Pod{ pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
@ -288,15 +281,12 @@ func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string) *api.Po
{ {
Name: "udp", Name: "udp",
ContainerPort: endpointUdpPort, ContainerPort: endpointUdpPort,
}, Protocol: api.ProtocolUDP,
{
Name: "host",
ContainerPort: endpointHttpPort,
HostPort: endpointHostPort,
}, },
}, },
}, },
}, },
NodeName: node,
}, },
} }
return pod return pod
@ -344,8 +334,8 @@ func (config *KubeProxyTestConfig) createNodePortService(selector map[string]str
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Type: api.ServiceTypeNodePort, Type: api.ServiceTypeNodePort,
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{Port: clusterHttpPort, Name: "http", Protocol: "TCP", NodePort: nodeHttpPort, TargetPort: intstr.FromInt(endpointHttpPort)}, {Port: clusterHttpPort, Name: "http", Protocol: api.ProtocolTCP, NodePort: nodeHttpPort, TargetPort: intstr.FromInt(endpointHttpPort)},
{Port: clusterUdpPort, Name: "udp", Protocol: "UDP", NodePort: nodeUdpPort, TargetPort: intstr.FromInt(endpointUdpPort)}, {Port: clusterUdpPort, Name: "udp", Protocol: api.ProtocolUDP, NodePort: nodeUdpPort, TargetPort: intstr.FromInt(endpointUdpPort)},
}, },
Selector: selector, Selector: selector,
}, },
@ -397,9 +387,26 @@ func (config *KubeProxyTestConfig) waitForLoadBalancerIngressSetup() {
config.loadBalancerService, _ = config.getServiceClient().Get(loadBalancerServiceName) config.loadBalancerService, _ = config.getServiceClient().Get(loadBalancerServiceName)
} }
func (config *KubeProxyTestConfig) createTestPod() { func (config *KubeProxyTestConfig) createTestPods() {
testContainerPod := config.createTestPodSpec() testContainerPod := config.createTestPodSpec()
config.testContainerPod = config.createPod(testContainerPod) hostTestContainerPod := NewHostExecPodSpec(config.f.Namespace.Name, hostTestPodName)
config.createPod(testContainerPod)
config.createPod(hostTestContainerPod)
expectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
expectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
var err error
config.testContainerPod, err = config.getPodClient().Get(testContainerPod.Name)
if err != nil {
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
}
config.hostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name)
if err != nil {
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
}
} }
func (config *KubeProxyTestConfig) createService(serviceSpec *api.Service) *api.Service { func (config *KubeProxyTestConfig) createService(serviceSpec *api.Service) *api.Service {
@ -422,13 +429,16 @@ func (config *KubeProxyTestConfig) setup() {
selectorName: "true", selectorName: "true",
} }
By("Getting ssh-able hosts") By("Getting two nodes")
hosts, err := NodeSSHHosts(config.f.Client) nodeList, err := config.f.Client.Nodes().List(labels.Everything(), fields.Everything())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get node list: %v", err))
config.nodes = make([]string, 0, len(hosts)) config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
for _, h := range hosts { if len(config.externalAddrs) < 2 {
config.nodes = append(config.nodes, strings.TrimSuffix(h, ":22")) // fall back to legacy IPs
config.externalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP)
} }
Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.nodes = nodeList.Items
if enableLoadBalancerTest { if enableLoadBalancerTest {
By("Creating the LoadBalancer Service on top of the pods in kubernetes") By("Creating the LoadBalancer Service on top of the pods in kubernetes")
@ -437,13 +447,13 @@ func (config *KubeProxyTestConfig) setup() {
By("Creating the service pods in kubernetes") By("Creating the service pods in kubernetes")
podName := "netserver" podName := "netserver"
config.endpointPods = config.createNetProxyPods(podName, serviceSelector, testContext.CloudConfig.NumNodes) config.endpointPods = config.createNetProxyPods(podName, serviceSelector)
By("Creating the service on top of the pods in kubernetes") By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(serviceSelector) config.createNodePortService(serviceSelector)
By("Creating test pods") By("Creating test pods")
config.createTestPod() config.createTestPods()
} }
func (config *KubeProxyTestConfig) cleanup() { func (config *KubeProxyTestConfig) cleanup() {
@ -458,23 +468,35 @@ func (config *KubeProxyTestConfig) cleanup() {
} }
} }
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string, nodeCount int) []*api.Pod { func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
//testContext.CloudConfig.NumNodes nodes, err := config.f.Client.Nodes().List(labels.Everything(), fields.Everything())
pods := make([]*api.Pod, 0) Expect(err).NotTo(HaveOccurred())
for i := 0; i < nodeCount; i++ { // create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items))
for i, n := range nodes.Items {
podName := fmt.Sprintf("%s-%d", podName, i) podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName) pod := config.createNetShellPodSpec(podName, n.Name)
pod.ObjectMeta.Labels = selector pod.ObjectMeta.Labels = selector
createdPod := config.createPod(pod) createdPod := config.createPod(pod)
pods = append(pods, createdPod) createdPods = append(createdPods, createdPod)
} }
return pods
// wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes.Items))
for _, p := range createdPods {
expectNoError(config.f.WaitForPodRunning(p.Name))
rp, err := config.getPodClient().Get(p.Name)
expectNoError(err)
runningPods = append(runningPods, rp)
}
return runningPods
} }
func (config *KubeProxyTestConfig) deleteNetProxyPod() { func (config *KubeProxyTestConfig) deleteNetProxyPod() {
pod := config.endpointPods[0] pod := config.endpointPods[0]
config.getPodClient().Delete(pod.Name, nil) config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0))
config.endpointPods = config.endpointPods[1:] config.endpointPods = config.endpointPods[1:]
// wait for pod being deleted. // wait for pod being deleted.
err := waitForPodToDisappear(config.f.Client, config.f.Namespace.Name, pod.Name, labels.Everything(), time.Second, util.ForeverTestTimeout) err := waitForPodToDisappear(config.f.Client, config.f.Namespace.Name, pod.Name, labels.Everything(), time.Second, util.ForeverTestTimeout)
@ -495,11 +517,6 @@ func (config *KubeProxyTestConfig) createPod(pod *api.Pod) *api.Pod {
if err != nil { if err != nil {
Failf("Failed to create %s pod: %v", pod.Name, err) Failf("Failed to create %s pod: %v", pod.Name, err)
} }
expectNoError(config.f.WaitForPodRunning(pod.Name))
createdPod, err = config.getPodClient().Get(pod.Name)
if err != nil {
Failf("Failed to retrieve %s pod: %v", pod.Name, err)
}
return createdPod return createdPod
} }

View File

@ -43,7 +43,7 @@ const (
type PrivilegedPodTestConfig struct { type PrivilegedPodTestConfig struct {
privilegedPod *api.Pod privilegedPod *api.Pod
f *Framework f *Framework
nodes []string hostExecPod *api.Pod
} }
var _ = Describe("PrivilegedPod", func() { var _ = Describe("PrivilegedPod", func() {
@ -52,15 +52,10 @@ var _ = Describe("PrivilegedPod", func() {
f: f, f: f,
} }
It("should test privileged pod", func() { It("should test privileged pod", func() {
SkipUnlessProviderIs(providersWithSSH...) hostExecPod := NewHostExecPodSpec(f.Namespace.Name, "hostexec")
pod, err := config.getPodClient().Create(hostExecPod)
By("Getting ssh-able hosts") expectNoError(err)
hosts, err := NodeSSHHosts(config.f.Client) config.hostExecPod = pod
Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 {
Failf("No ssh-able nodes")
}
config.nodes = hosts
By("Creating a privileged pod") By("Creating a privileged pod")
config.createPrivilegedPod() config.createPrivilegedPod()
@ -96,8 +91,7 @@ func (config *PrivilegedPodTestConfig) dialFromContainer(containerIP string, con
v.Encode()) v.Encode())
By(fmt.Sprintf("Exec-ing into container over http. Running command:%s", cmd)) By(fmt.Sprintf("Exec-ing into container over http. Running command:%s", cmd))
stdout := config.ssh(cmd) stdout := RunHostCmdOrDie(config.hostExecPod.Namespace, config.hostExecPod.Name, cmd)
Logf("Output is %q", stdout)
var output map[string]string var output map[string]string
err := json.Unmarshal([]byte(stdout), &output) err := json.Unmarshal([]byte(stdout), &output)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout))
@ -172,10 +166,3 @@ func (config *PrivilegedPodTestConfig) getPodClient() client.PodInterface {
func (config *PrivilegedPodTestConfig) getNamespaceClient() client.NamespaceInterface { func (config *PrivilegedPodTestConfig) getNamespaceClient() client.NamespaceInterface {
return config.f.Client.Namespaces() return config.f.Client.Namespaces()
} }
func (config *PrivilegedPodTestConfig) ssh(cmd string) string {
stdout, _, code, err := SSH(cmd, config.nodes[0], testContext.Provider)
Expect(err).NotTo(HaveOccurred(), "error while SSH-ing to node: %v (code %v)", err, code)
Expect(code).Should(BeZero(), "command exited with non-zero code %v. cmd:%s", code, cmd)
return stdout
}

View File

@ -394,17 +394,11 @@ var _ = Describe("Services", func() {
ip := pickNodeIP(c) ip := pickNodeIP(c)
testReachable(ip, nodePort) testReachable(ip, nodePort)
// this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec")
if providerIs(providersWithSSH...) { cmd := fmt.Sprintf(`ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort)
hosts, err := NodeSSHHosts(c) stdout, err := RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred()) Failf("expected node port (%d) to be in use, stdout: %v", nodePort, stdout)
}
cmd := fmt.Sprintf(`test -n "$(ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN)"`, nodePort)
_, _, code, err := SSH(cmd, hosts[0], testContext.Provider)
if code != 0 {
Failf("expected node port (%d) to be in use", nodePort)
}
} }
}) })
@ -759,17 +753,11 @@ var _ = Describe("Services", func() {
err = t.DeleteService(serviceName) err = t.DeleteService(serviceName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec")
if providerIs(providersWithSSH...) { cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort)
hosts, err := NodeSSHHosts(c) stdout, err := RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred()) Failf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout)
}
cmd := fmt.Sprintf(`test -n "$(ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN)"`, nodePort)
_, _, code, err := SSH(cmd, hosts[0], testContext.Provider)
if code == 0 {
Failf("expected node port (%d) to not be in use", nodePort)
}
} }
By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort))

View File

@ -22,6 +22,7 @@ import (
"io" "io"
"math" "math"
"math/rand" "math/rand"
"net"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@ -1077,7 +1078,8 @@ func (b kubectlBuilder) exec() (string, error) {
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return "", fmt.Errorf("Error running %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr) return "", fmt.Errorf("Error running %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
} }
Logf(stdout.String()) Logf("stdout: %q", stdout.String())
Logf("stderr: %q", stderr.String())
// TODO: trimspace should be unnecessary after switching to use kubectl binary directly // TODO: trimspace should be unnecessary after switching to use kubectl binary directly
return strings.TrimSpace(stdout.String()), nil return strings.TrimSpace(stdout.String()), nil
} }
@ -1883,27 +1885,35 @@ func BadEvents(events []*api.Event) int {
return badEvents return badEvents
} }
// NodeSSHHosts returns SSH-able host names for all nodes. It returns an error // NodeAddresses returns the first address of the given type of each node.
// if it can't find an external IP for every node, though it still returns all func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []string {
// hosts that it found in that case. hosts := []string{}
func NodeSSHHosts(c *client.Client) ([]string, error) {
var hosts []string
nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
return hosts, fmt.Errorf("error getting nodes: %v", err)
}
for _, n := range nodelist.Items { for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses { for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and // Use the first external IP address we find on the node, and
// use at most one per node. // use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once // TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462). // such a thing is defined (#2462).
if addr.Type == api.NodeExternalIP { if addr.Type == addrType {
hosts = append(hosts, addr.Address+":22") hosts = append(hosts, addr.Address)
break break
} }
} }
} }
return hosts
}
// NodeSSHHosts returns SSH-able host names for all nodes. It returns an error
// if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) {
nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
return nil, fmt.Errorf("error getting nodes: %v", err)
}
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, api.NodeExternalIP)
// Error if any node didn't have an external IP. // Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) { if len(hosts) != len(nodelist.Items) {
@ -1911,7 +1921,12 @@ func NodeSSHHosts(c *client.Client) ([]string, error) {
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v", "only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist) len(hosts), len(nodelist.Items), nodelist)
} }
return hosts, nil
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, "22"))
}
return sshHosts, nil
} }
// SSH synchronously SSHs to a node running on provider and runs cmd. If there // SSH synchronously SSHs to a node running on provider and runs cmd. If there
@ -1982,8 +1997,15 @@ func NewHostExecPodSpec(ns, name string) *api.Pod {
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec` // RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell. // inside of a shell.
func RunHostCmd(ns, name, cmd string) string { func RunHostCmd(ns, name, cmd string) (string, error) {
return runKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd) return runKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
expectNoError(err)
return stdout
} }
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits // LaunchHostExecPod launches a hostexec pod in the given namespace and waits

View File

@ -14,6 +14,9 @@
FROM alpine:3.2 FROM alpine:3.2
# install necessary packages:
# - curl, nc: used by a lot of e2e tests
# - iproute2: includes ss used in NodePort tests
run apk --update add curl netcat-openbsd iproute2 && rm -rf /var/cache/apk/* run apk --update add curl netcat-openbsd iproute2 && rm -rf /var/cache/apk/*
# wait forever # wait forever