k3s/test/e2e/service.go

1867 lines
62 KiB
Go
Raw Normal View History

2015-01-13 02:11:27 +00:00
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
2015-01-13 02:11:27 +00:00
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"io/ioutil"
2015-05-22 23:01:35 +00:00
"math/rand"
2015-09-28 20:57:58 +00:00
"net"
"net/http"
"sort"
"strconv"
"strings"
2015-01-13 02:11:27 +00:00
"time"
2015-08-05 22:05:17 +00:00
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
2015-08-13 19:01:50 +00:00
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/intstr"
utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/pkg/util/sets"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/util/wait"
2015-01-13 02:11:27 +00:00
)
// Maximum time a kube-proxy daemon on a node is allowed to not
// notice a Service update, such as type=NodePort.
// TODO: This timeout should be O(10s), observed values are O(1m), 5m is very
// liberal. Fix tracked in #20567.
const kubeProxyLagTimeout = 5 * time.Minute
// Maximum time a load balancer is allowed to not respond after creation.
const loadBalancerLagTimeout = 2 * time.Minute
// How long to wait for a load balancer to be created/modified.
//TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
const loadBalancerCreateTimeout = 20 * time.Minute
2015-05-22 23:01:35 +00:00
// This should match whatever the default/configured range is
var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768}
2015-05-22 23:01:35 +00:00
var _ = Describe("Services", func() {
f := NewFramework("services")
var c *client.Client
2015-09-29 21:30:45 +00:00
var extraNamespaces []string
BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
2015-05-08 17:07:32 +00:00
})
AfterEach(func() {
2015-10-27 08:24:32 +00:00
if testContext.DeleteNamespace {
for _, ns := range extraNamespaces {
By(fmt.Sprintf("Destroying namespace %v", ns))
if err := deleteNS(c, ns, 5*time.Minute /* namespace deletion timeout */); err != nil {
Failf("Couldn't delete namespace %s: %s", ns, err)
}
2015-05-08 17:07:32 +00:00
}
2015-10-27 08:24:32 +00:00
extraNamespaces = nil
} else {
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
2015-05-08 17:07:32 +00:00
}
})
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
2015-09-29 21:30:45 +00:00
It("should provide secure master service [Conformance]", func() {
_, err := c.Services(api.NamespaceDefault).Get("kubernetes")
Expect(err).NotTo(HaveOccurred())
})
It("should serve a basic endpoint from pods [Conformance]", func() {
// TODO: use the ServiceTestJig here
serviceName := "endpoint-test2"
2015-09-29 21:30:45 +00:00
ns := f.Namespace.Name
labels := map[string]string{
"foo": "bar",
"baz": "blah",
}
2015-09-29 21:30:45 +00:00
By("creating service " + serviceName + " in namespace " + ns)
defer func() {
err := c.Services(ns).Delete(serviceName)
Expect(err).NotTo(HaveOccurred())
}()
service := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Selector: labels,
Ports: []api.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
2015-03-13 15:16:41 +00:00
}},
},
}
_, err := c.Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred())
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
2015-09-29 21:30:45 +00:00
names := map[string]bool{}
defer func() {
2015-09-29 21:30:45 +00:00
for name := range names {
err := c.Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
}()
2015-09-29 21:30:45 +00:00
name1 := "pod1"
name2 := "pod2"
2015-09-29 21:30:45 +00:00
createPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
names[name1] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}})
2015-09-29 21:30:45 +00:00
createPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
names[name2] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}})
2015-09-29 21:30:45 +00:00
deletePodOrFail(c, ns, name1)
delete(names, name1)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name2: {80}})
2015-09-29 21:30:45 +00:00
deletePodOrFail(c, ns, name2)
delete(names, name2)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
})
2015-05-08 17:07:32 +00:00
It("should serve multiport endpoints from pods [Conformance]", func() {
// TODO: use the ServiceTestJig here
2015-05-08 17:07:32 +00:00
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
serviceName := "multi-endpoint-test"
2015-09-29 21:30:45 +00:00
ns := f.Namespace.Name
2015-05-08 17:07:32 +00:00
defer func() {
err := c.Services(ns).Delete(serviceName)
Expect(err).NotTo(HaveOccurred())
}()
labels := map[string]string{"foo": "bar"}
svc1port := "svc1"
svc2port := "svc2"
2015-09-29 21:30:45 +00:00
By("creating service " + serviceName + " in namespace " + ns)
2015-05-08 17:07:32 +00:00
service := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Spec: api.ServiceSpec{
Selector: labels,
Ports: []api.ServicePort{
{
Name: "portname1",
Port: 80,
TargetPort: intstr.FromString(svc1port),
2015-05-08 17:07:32 +00:00
},
{
Name: "portname2",
Port: 81,
TargetPort: intstr.FromString(svc2port),
2015-05-08 17:07:32 +00:00
},
},
},
}
_, err := c.Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred())
port1 := 100
port2 := 101
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
2015-05-08 17:07:32 +00:00
2015-09-29 21:30:45 +00:00
names := map[string]bool{}
2015-05-08 17:07:32 +00:00
defer func() {
2015-09-29 21:30:45 +00:00
for name := range names {
2015-05-08 17:07:32 +00:00
err := c.Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
}()
containerPorts1 := []api.ContainerPort{
{
Name: svc1port,
ContainerPort: port1,
},
}
containerPorts2 := []api.ContainerPort{
{
Name: svc2port,
ContainerPort: port2,
},
}
2015-09-29 21:30:45 +00:00
podname1 := "pod1"
podname2 := "pod2"
createPodOrFail(c, ns, podname1, labels, containerPorts1)
names[podname1] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}})
2015-05-08 17:07:32 +00:00
2015-09-29 21:30:45 +00:00
createPodOrFail(c, ns, podname2, labels, containerPorts2)
names[podname2] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}})
2015-05-08 17:07:32 +00:00
2015-09-29 21:30:45 +00:00
deletePodOrFail(c, ns, podname1)
delete(names, podname1)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}})
2015-05-08 17:07:32 +00:00
2015-09-29 21:30:45 +00:00
deletePodOrFail(c, ns, podname2)
delete(names, podname2)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})
})
2015-07-16 12:38:47 +00:00
It("should be able to up and down services", func() {
// TODO: use the ServiceTestJig here
// this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP
SkipUnlessProviderIs(providersWithSSH...)
2015-09-29 21:30:45 +00:00
ns := f.Namespace.Name
2015-07-16 12:38:47 +00:00
numPods, servicePort := 3, 80
2016-02-03 19:06:08 +00:00
By("creating service1 in namespace " + ns)
2015-07-16 12:38:47 +00:00
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
2016-02-03 19:06:08 +00:00
By("creating service2 in namespace " + ns)
2015-07-16 12:38:47 +00:00
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
hosts, err := NodeSSHHosts(c)
Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 {
Failf("No ssh-able nodes")
}
host := hosts[0]
2016-02-03 19:06:08 +00:00
By("verifying service1 is up")
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
2016-02-03 19:06:08 +00:00
By("verifying service2 is up")
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
2015-07-16 12:38:47 +00:00
// Stop service 1 and make sure it is gone.
2016-02-03 19:06:08 +00:00
By("stopping service1")
2015-07-16 12:38:47 +00:00
expectNoError(stopServeHostnameService(c, ns, "service1"))
2016-02-03 19:06:08 +00:00
By("verifying service1 is not up")
2015-07-16 12:38:47 +00:00
expectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort))
2016-02-03 19:06:08 +00:00
By("verifying service2 is still up")
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
2015-07-16 12:38:47 +00:00
// Start another service and verify both are up.
2016-02-03 19:06:08 +00:00
By("creating service3 in namespace " + ns)
2015-07-16 12:38:47 +00:00
podNames3, svc3IP, err := startServeHostnameService(c, ns, "service3", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
if svc2IP == svc3IP {
2016-02-03 19:06:08 +00:00
Failf("service IPs conflict: %v", svc2IP)
2015-07-16 12:38:47 +00:00
}
2016-02-03 19:06:08 +00:00
By("verifying service2 is still up")
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
2015-07-16 12:38:47 +00:00
2016-02-03 19:06:08 +00:00
By("verifying service3 is up")
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames3, svc3IP, servicePort))
2015-07-16 12:38:47 +00:00
})
It("should work after restarting kube-proxy [Disruptive]", func() {
// TODO: use the ServiceTestJig here
2015-07-16 12:38:47 +00:00
SkipUnlessProviderIs("gce", "gke")
2015-09-29 21:30:45 +00:00
ns := f.Namespace.Name
2015-07-16 12:38:47 +00:00
numPods, servicePort := 3, 80
2015-09-29 21:30:45 +00:00
svc1 := "service1"
svc2 := "service2"
defer func() { expectNoError(stopServeHostnameService(c, ns, svc1)) }()
podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods)
2015-07-16 12:38:47 +00:00
Expect(err).NotTo(HaveOccurred())
2015-09-29 21:30:45 +00:00
defer func() { expectNoError(stopServeHostnameService(c, ns, svc2)) }()
podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods)
2015-07-16 12:38:47 +00:00
Expect(err).NotTo(HaveOccurred())
if svc1IP == svc2IP {
Failf("VIPs conflict: %v", svc1IP)
}
hosts, err := NodeSSHHosts(c)
Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 {
Failf("No ssh-able nodes")
}
host := hosts[0]
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
2015-07-16 12:38:47 +00:00
2015-09-29 21:30:45 +00:00
By("Restarting kube-proxy")
2015-07-16 12:38:47 +00:00
if err := restartKubeProxy(host); err != nil {
Failf("error restarting kube-proxy: %v", err)
}
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
2015-09-29 21:30:45 +00:00
By("Removing iptable rules")
result, err := SSH(`
2015-10-01 22:16:12 +00:00
sudo iptables -t nat -F KUBE-SERVICES || true;
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
Failf("couldn't remove iptable rules: %v", err)
2015-07-16 12:38:47 +00:00
}
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
2015-07-16 12:38:47 +00:00
})
It("should work after restarting apiserver [Disruptive]", func() {
// TODO: use the ServiceTestJig here
2015-07-16 12:38:47 +00:00
// TODO: restartApiserver doesn't work in GKE - fix it and reenable this test.
SkipUnlessProviderIs("gce")
2015-09-29 21:30:45 +00:00
ns := f.Namespace.Name
2015-07-16 12:38:47 +00:00
numPods, servicePort := 3, 80
defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }()
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
2015-07-16 12:38:47 +00:00
hosts, err := NodeSSHHosts(c)
Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 {
Failf("No ssh-able nodes")
}
host := hosts[0]
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
2015-07-16 12:38:47 +00:00
// Restart apiserver
if err := restartApiserver(); err != nil {
Failf("error restarting apiserver: %v", err)
}
if err := waitForApiserverUp(c); err != nil {
Failf("error while waiting for apiserver up: %v", err)
}
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
2015-07-16 12:38:47 +00:00
// Create a new service and check if it's not reusing IP.
defer func() { expectNoError(stopServeHostnameService(c, ns, "service2")) }()
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
2015-07-16 12:38:47 +00:00
if svc1IP == svc2IP {
Failf("VIPs conflict: %v", svc1IP)
}
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort))
2015-07-16 12:38:47 +00:00
})
// TODO: Run this test against the userspace proxy and nodes
// configured with a default deny firewall to validate that the
// proxy whitelists NodePort traffic.
It("should be able to create a functioning NodePort service", func() {
serviceName := "nodeport-test"
2015-09-29 21:30:45 +00:00
ns := f.Namespace.Name
jig := NewServiceTestJig(c, serviceName)
nodeIP := pickNodeIP(jig.Client) // for later
2015-05-22 23:01:35 +00:00
By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeNodePort
})
jig.SanityCheckService(service, api.ServiceTypeNodePort)
nodePort := service.Spec.Ports[0].NodePort
2015-05-22 23:01:35 +00:00
By("creating pod to be part of service " + serviceName)
jig.RunOrFail(ns, nil)
2015-05-22 23:01:35 +00:00
By("hitting the pod through the service's NodePort")
jig.TestReachableHTTP(nodeIP, nodePort, kubeProxyLagTimeout)
By("verifying the node port is locked")
hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec")
// Even if the node-ip:node-port check above passed, this hostexec pod
// might fall on a node with a laggy kube-proxy.
cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort)
stdout, err := RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil {
Failf("expected node port %d to be in use, stdout: %v", nodePort, stdout)
}
2015-05-22 23:01:35 +00:00
})
It("should be able to change the type and ports of a service", func() {
2015-09-29 21:30:45 +00:00
// requires cloud load-balancer support
2015-07-09 16:47:12 +00:00
SkipUnlessProviderIs("gce", "gke", "aws")
// This test is more monolithic than we'd like because LB turnup can be
// very slow, so we lumped all the tests into one LB lifecycle.
2015-05-22 23:01:35 +00:00
serviceName := "mutability-test"
ns1 := f.Namespace.Name // LB1 in ns1 on TCP
Logf("namespace for TCP test: %s", ns1)
2015-05-22 23:01:35 +00:00
By("creating a second namespace")
namespacePtr, err := createTestingNS("services", c, nil)
Expect(err).NotTo(HaveOccurred())
ns2 := namespacePtr.Name // LB2 in ns2 on UDP
Logf("namespace for UDP test: %s", ns2)
extraNamespaces = append(extraNamespaces, ns2)
2015-05-22 23:01:35 +00:00
jig := NewServiceTestJig(c, serviceName)
nodeIP := pickNodeIP(jig.Client) // for later
// Test TCP and UDP Services. Services with the same name in different
// namespaces should get different node ports and load balancers.
By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
tcpService := jig.CreateTCPServiceOrFail(ns1, nil)
jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP)
By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2)
udpService := jig.CreateUDPServiceOrFail(ns2, nil)
jig.SanityCheckService(udpService, api.ServiceTypeClusterIP)
By("verifying that TCP and UDP use the same port")
if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port {
Failf("expected to use the same port for TCP and UDP")
}
svcPort := tcpService.Spec.Ports[0].Port
Logf("service port (TCP and UDP): %d", svcPort)
By("creating a pod to be part of the TCP service " + serviceName)
jig.RunOrFail(ns1, nil)
By("creating a pod to be part of the UDP service " + serviceName)
jig.RunOrFail(ns2, nil)
2015-05-22 23:01:35 +00:00
// Change the services to NodePort.
By("changing the TCP service " + serviceName + " to type=NodePort")
tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) {
s.Spec.Type = api.ServiceTypeNodePort
})
jig.SanityCheckService(tcpService, api.ServiceTypeNodePort)
tcpNodePort := tcpService.Spec.Ports[0].NodePort
Logf("TCP node port: %d", tcpNodePort)
2015-05-22 23:01:35 +00:00
By("changing the UDP service " + serviceName + " to type=NodePort")
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
s.Spec.Type = api.ServiceTypeNodePort
})
jig.SanityCheckService(udpService, api.ServiceTypeNodePort)
udpNodePort := udpService.Spec.Ports[0].NodePort
Logf("UDP node port: %d", udpNodePort)
2015-09-29 21:30:45 +00:00
By("hitting the TCP service's NodePort")
jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout)
By("hitting the UDP service's NodePort")
jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout)
// Change the services to LoadBalancer.
requestedIP := ""
if providerIs("gce", "gke") {
By("creating a static load balancer IP")
rand.Seed(time.Now().UTC().UnixNano())
staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535))
requestedIP, err = createGCEStaticIP(staticIPName)
Expect(err).NotTo(HaveOccurred())
defer func() {
// Release GCE static IP - this is not kube-managed and will not be automatically released.
deleteGCEStaticIP(staticIPName)
}()
Logf("Allocated static load balancer IP: %s", requestedIP)
}
2015-05-22 23:01:35 +00:00
By("changing the TCP service " + serviceName + " to type=LoadBalancer")
tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) {
s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable
s.Spec.Type = api.ServiceTypeLoadBalancer
})
2015-05-22 23:01:35 +00:00
By("changing the UDP service " + serviceName + " to type=LoadBalancer")
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
s.Spec.Type = api.ServiceTypeLoadBalancer
})
2015-05-22 23:01:35 +00:00
By("waiting for the TCP service " + serviceName + " to have a load balancer")
// Wait for the load balancer to be created asynchronously
tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name)
jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer)
if tcpService.Spec.Ports[0].NodePort != tcpNodePort {
Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
2015-05-22 23:01:35 +00:00
}
if requestedIP != "" && getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP {
Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
2015-05-22 23:01:35 +00:00
}
tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
Logf("TCP load balancer: %s", tcpIngressIP)
By("waiting for the UDP service " + serviceName + " to have a load balancer")
// 2nd one should be faster since they ran in parallel.
udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name)
jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
if udpService.Spec.Ports[0].NodePort != udpNodePort {
Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
2015-05-22 23:01:35 +00:00
}
udpIngressIP := getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
Logf("UDP load balancer: %s", tcpIngressIP)
2015-09-29 21:30:45 +00:00
By("verifying that TCP and UDP use different load balancers")
if tcpIngressIP == udpIngressIP {
Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
2015-05-22 23:01:35 +00:00
}
By("hitting the TCP service's NodePort")
jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout)
By("hitting the UDP service's NodePort")
jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout)
By("hitting the TCP service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
By("hitting the UDP service's LoadBalancer")
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
// Change the services' node ports.
By("changing the TCP service's " + serviceName + " NodePort")
tcpService = jig.ChangeServiceNodePortOrFail(ns1, tcpService.Name, tcpNodePort)
jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer)
tcpNodePortOld := tcpNodePort
tcpNodePort = tcpService.Spec.Ports[0].NodePort
if tcpNodePort == tcpNodePortOld {
Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort)
2015-05-22 23:01:35 +00:00
}
if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
2015-05-22 23:01:35 +00:00
}
Logf("TCP node port: %d", tcpNodePort)
By("changing the UDP service's " + serviceName + " NodePort")
udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort)
jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
udpNodePortOld := udpNodePort
udpNodePort = udpService.Spec.Ports[0].NodePort
if udpNodePort == udpNodePortOld {
Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
2015-05-22 23:01:35 +00:00
}
if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
2015-05-22 23:01:35 +00:00
}
Logf("UDP node port: %d", udpNodePort)
By("hitting the TCP service's new NodePort")
jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout)
By("hitting the UDP service's new NodePort")
jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout)
By("checking the old TCP NodePort is closed")
jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, kubeProxyLagTimeout)
2015-05-22 23:01:35 +00:00
By("checking the old UDP NodePort is closed")
jig.TestNotReachableUDP(nodeIP, udpNodePortOld, kubeProxyLagTimeout)
2015-09-29 21:30:45 +00:00
By("hitting the TCP service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
2015-09-29 21:30:45 +00:00
By("hitting the UDP service's LoadBalancer")
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
// Change the services' main ports.
By("changing the TCP service's port")
tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) {
s.Spec.Ports[0].Port++
})
jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer)
svcPortOld := svcPort
svcPort = tcpService.Spec.Ports[0].Port
if svcPort == svcPortOld {
Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort)
}
if tcpService.Spec.Ports[0].NodePort != tcpNodePort {
Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort)
2015-09-29 21:30:45 +00:00
}
if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
}
Logf("service port (TCP and UDP): %d", svcPort)
2015-09-29 21:30:45 +00:00
By("changing the UDP service's port")
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
s.Spec.Ports[0].Port++
})
jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
if udpService.Spec.Ports[0].Port != svcPort {
Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
}
if udpService.Spec.Ports[0].NodePort != udpNodePort {
Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
}
if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
}
By("hitting the TCP service's NodePort")
jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout)
By("hitting the UDP service's NodePort")
jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout)
By("hitting the TCP service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB
By("hitting the UDP service's LoadBalancer")
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB)
// Change the services back to ClusterIP.
By("changing TCP service " + serviceName + " back to type=ClusterIP")
tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) {
s.Spec.Type = api.ServiceTypeClusterIP
s.Spec.Ports[0].NodePort = 0
})
// Wait for the load balancer to be destroyed asynchronously
tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns1, tcpService.Name, tcpIngressIP, svcPort)
jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP)
By("changing UDP service " + serviceName + " back to type=ClusterIP")
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
s.Spec.Type = api.ServiceTypeClusterIP
s.Spec.Ports[0].NodePort = 0
})
// Wait for the load balancer to be destroyed asynchronously
udpService = jig.WaitForLoadBalancerDestroyOrFail(ns2, udpService.Name, udpIngressIP, svcPort)
jig.SanityCheckService(udpService, api.ServiceTypeClusterIP)
By("checking the TCP NodePort is closed")
jig.TestNotReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout)
2015-05-22 23:01:35 +00:00
By("checking the UDP NodePort is closed")
jig.TestNotReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout)
By("checking the TCP LoadBalancer is closed")
jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
By("checking the UDP LoadBalancer is closed")
jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
2015-05-22 23:01:35 +00:00
})
It("should prevent NodePort collisions", func() {
// TODO: use the ServiceTestJig here
2015-09-29 21:30:45 +00:00
baseName := "nodeport-collision-"
serviceName1 := baseName + "1"
serviceName2 := baseName + "2"
ns := f.Namespace.Name
2015-05-22 23:01:35 +00:00
2015-09-28 20:57:58 +00:00
t := NewServerTest(c, ns, serviceName1)
2015-05-22 23:01:35 +00:00
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
2015-09-29 21:30:45 +00:00
By("creating service " + serviceName1 + " with type NodePort in namespace " + ns)
2015-05-22 23:01:35 +00:00
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
result, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
if result.Spec.Type != api.ServiceTypeNodePort {
Failf("got unexpected Spec.Type for new service: %v", result)
}
if len(result.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for new service: %v", result)
}
port := result.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result)
}
2015-09-29 21:30:45 +00:00
By("creating service " + serviceName2 + " with conflicting NodePort")
2015-05-22 23:01:35 +00:00
service2 := t.BuildServiceSpec()
service2.Name = serviceName2
service2.Spec.Type = api.ServiceTypeNodePort
service2.Spec.Ports[0].NodePort = port.NodePort
result2, err := t.CreateService(service2)
if err == nil {
Failf("Created service with conflicting NodePort: %v", result2)
}
2016-01-20 05:13:39 +00:00
expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort)
Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr))
2015-05-22 23:01:35 +00:00
2015-09-29 21:30:45 +00:00
By("deleting service " + serviceName1 + " to release NodePort")
err = t.DeleteService(serviceName1)
2015-05-22 23:01:35 +00:00
Expect(err).NotTo(HaveOccurred())
By("creating service " + serviceName2 + " with no-longer-conflicting NodePort")
_, err = t.CreateService(service2)
Expect(err).NotTo(HaveOccurred())
})
It("should check NodePort out-of-range", func() {
// TODO: use the ServiceTestJig here
2015-05-22 23:01:35 +00:00
serviceName := "nodeport-range-test"
2015-09-29 21:30:45 +00:00
ns := f.Namespace.Name
2015-05-22 23:01:35 +00:00
2015-09-28 20:57:58 +00:00
t := NewServerTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
2015-05-22 23:01:35 +00:00
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
2015-05-22 23:01:35 +00:00
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
service, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeNodePort {
Failf("got unexpected Spec.Type for new service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for new service: %v", service)
}
port := service.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
}
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for new service: %v", service)
}
2015-05-22 23:01:35 +00:00
outOfRangeNodePort := 0
for {
outOfRangeNodePort = 1 + rand.Intn(65535)
if !ServiceNodePortRange.Contains(outOfRangeNodePort) {
break
}
}
2015-05-22 23:01:35 +00:00
By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort))
result, err := updateService(c, ns, serviceName, func(s *api.Service) {
s.Spec.Ports[0].NodePort = outOfRangeNodePort
})
2015-05-22 23:01:35 +00:00
if err == nil {
Failf("failed to prevent update of service with out-of-range NodePort: %v", result)
}
2016-01-20 05:13:39 +00:00
expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort)
Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr))
2015-05-22 23:01:35 +00:00
By("deleting original service " + serviceName)
err = t.DeleteService(serviceName)
Expect(err).NotTo(HaveOccurred())
2015-05-22 23:01:35 +00:00
By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort))
service = t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
service.Spec.Ports[0].NodePort = outOfRangeNodePort
service, err = t.CreateService(service)
if err == nil {
Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service)
}
2016-01-20 05:13:39 +00:00
Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr))
2015-05-22 23:01:35 +00:00
})
It("should release NodePorts on delete", func() {
// TODO: use the ServiceTestJig here
2015-05-22 23:01:35 +00:00
serviceName := "nodeport-reuse"
2015-09-29 21:30:45 +00:00
ns := f.Namespace.Name
2015-05-22 23:01:35 +00:00
2015-09-28 20:57:58 +00:00
t := NewServerTest(c, ns, serviceName)
2015-05-22 23:01:35 +00:00
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
service, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
2015-05-22 23:01:35 +00:00
if service.Spec.Type != api.ServiceTypeNodePort {
Failf("got unexpected Spec.Type for new service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for new service: %v", service)
}
2015-05-22 23:01:35 +00:00
port := service.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
}
2015-05-22 23:01:35 +00:00
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for new service: %v", service)
}
nodePort := port.NodePort
2015-05-22 23:01:35 +00:00
By("deleting original service " + serviceName)
err = t.DeleteService(serviceName)
Expect(err).NotTo(HaveOccurred())
hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec")
cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort)
var stdout string
if pollErr := wait.PollImmediate(poll, kubeProxyLagTimeout, func() (bool, error) {
var err error
stdout, err = RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil {
Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout)
return false, nil
}
return true, nil
}); pollErr != nil {
Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, kubeProxyLagTimeout, stdout)
}
By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort))
2015-05-22 23:01:35 +00:00
service = t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
service.Spec.Ports[0].NodePort = nodePort
2015-05-22 23:01:35 +00:00
service, err = t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
})
})
// updateService fetches a service, calls the update function on it,
// and then attempts to send the updated service. It retries up to 2
// times in the face of timeouts and conflicts.
func updateService(c *client.Client, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) {
var service *api.Service
var err error
for i := 0; i < 3; i++ {
service, err = c.Services(namespace).Get(serviceName)
if err != nil {
return service, err
}
update(service)
service, err = c.Services(namespace).Update(service)
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
return service, err
}
}
return service, err
}
func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID {
m := PortsByPodUID{}
for _, ss := range endpoints.Subsets {
for _, port := range ss.Ports {
2015-05-08 17:07:32 +00:00
for _, addr := range ss.Addresses {
containerPort := port.Port
hostPort := port.Port
// use endpoint annotations to recover the container port in a Mesos setup
// compare contrib/mesos/pkg/service/endpoints_controller.syncService
key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort)
mesosContainerPortString := endpoints.Annotations[key]
if mesosContainerPortString != "" {
var err error
containerPort, err = strconv.Atoi(mesosContainerPortString)
if err != nil {
continue
}
Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString)
}
2015-09-29 21:30:45 +00:00
// Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
if _, ok := m[addr.TargetRef.UID]; !ok {
m[addr.TargetRef.UID] = make([]int, 0)
}
m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], containerPort)
}
}
}
2015-05-08 17:07:32 +00:00
return m
}
type PortsByPodName map[string][]int
type PortsByPodUID map[types.UID][]int
func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints PortsByPodName) PortsByPodUID {
portsByUID := make(PortsByPodUID)
2015-05-08 17:07:32 +00:00
for name, portList := range expectedEndpoints {
pod, err := c.Pods(ns).Get(name)
if err != nil {
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
}
portsByUID[pod.ObjectMeta.UID] = portList
}
2015-09-29 21:30:45 +00:00
// Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns)
return portsByUID
2015-05-08 17:07:32 +00:00
}
func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUID) {
2015-05-08 17:07:32 +00:00
if len(endpoints) != len(expectedEndpoints) {
// should not happen because we check this condition before
Failf("invalid number of endpoints got %v, expected %v", endpoints, expectedEndpoints)
}
for podUID := range expectedEndpoints {
if _, ok := endpoints[podUID]; !ok {
Failf("endpoint %v not found", podUID)
}
if len(endpoints[podUID]) != len(expectedEndpoints[podUID]) {
Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID])
}
sort.Ints(endpoints[podUID])
sort.Ints(expectedEndpoints[podUID])
for index := range endpoints[podUID] {
if endpoints[podUID][index] != expectedEndpoints[podUID][index] {
Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID])
2015-05-08 17:07:32 +00:00
}
}
}
}
func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) {
2015-09-29 21:30:45 +00:00
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints))
i := 1
for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(1 * time.Second) {
endpoints, err := c.Endpoints(namespace).Get(serviceName)
if err != nil {
Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
continue
}
2015-09-29 21:30:45 +00:00
// Logf("Found endpoints %v", endpoints)
2015-05-08 17:07:32 +00:00
portsByPodUID := getContainerPortsByPodUID(endpoints)
2015-09-29 21:30:45 +00:00
// Logf("Found port by pod UID %v", portsByPodUID)
2015-05-08 17:07:32 +00:00
expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints)
if len(portsByPodUID) == len(expectedEndpoints) {
validatePortsOrFail(portsByPodUID, expectedPortsByPodUID)
2015-09-29 21:30:45 +00:00
Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
serviceName, namespace, expectedEndpoints, time.Since(start))
return
}
2015-09-29 21:30:45 +00:00
if i%5 == 0 {
Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
}
i++
}
if pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}); err == nil {
for _, pod := range pods.Items {
Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
}
} else {
Logf("Can't list pod debug info: %v", err)
}
Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, serviceStartTimeout)
}
// createExecPodOrFail creates a simple busybox pod in a sleep loop used as a
// vessel for kubectl exec commands.
func createExecPodOrFail(c *client.Client, ns, name string) {
Logf("Creating new exec pod")
immediate := int64(0)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: api.PodSpec{
TerminationGracePeriodSeconds: &immediate,
Containers: []api.Container{
{
Name: "exec",
Image: "gcr.io/google_containers/busybox",
Command: []string{"sh", "-c", "while true; do sleep 5; done"},
},
},
},
}
_, err := c.Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := c.Pods(pod.Namespace).Get(pod.Name)
if err != nil {
return false, nil
}
return retrievedPod.Status.Phase == api.PodRunning, nil
})
Expect(err).NotTo(HaveOccurred())
}
2015-09-29 21:30:45 +00:00
func createPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
By(fmt.Sprintf("creating pod %s in namespace %s", name, ns))
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "test",
Image: "gcr.io/google_containers/pause:2.0",
2015-05-08 17:07:32 +00:00
Ports: containerPorts,
},
},
},
}
_, err := c.Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
}
2015-09-29 21:30:45 +00:00
func deletePodOrFail(c *client.Client, ns, name string) {
By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns))
err := c.Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []string {
ips := []string{}
for i := range nodes.Items {
item := &nodes.Items[i]
for j := range item.Status.Addresses {
nodeAddress := &item.Status.Addresses[j]
if nodeAddress.Type == addressType {
ips = append(ips, nodeAddress.Address)
}
}
}
return ips
}
func getNodePublicIps(c *client.Client) ([]string, error) {
nodes := ListSchedulableNodesOrDie(c)
ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 {
ips = collectAddresses(nodes, api.NodeLegacyHostIP)
}
return ips, nil
}
2015-05-22 23:01:35 +00:00
func pickNodeIP(c *client.Client) string {
publicIps, err := getNodePublicIps(c)
2015-05-22 23:01:35 +00:00
Expect(err).NotTo(HaveOccurred())
if len(publicIps) == 0 {
Failf("got unexpected number (%d) of public IPs", len(publicIps))
}
ip := publicIps[0]
return ip
}
func testReachableHTTP(ip string, port int, request string, expect string) (bool, error) {
url := fmt.Sprintf("http://%s:%d%s", ip, port, request)
if ip == "" {
Failf("Got empty IP for reachability check (%s)", url)
return false, nil
}
if port == 0 {
Failf("Got port==0 for reachability check (%s)", url)
return false, nil
}
Logf("Testing HTTP reachability of %v", url)
resp, err := httpGetNoConnectionPool(url)
if err != nil {
Logf("Got error testing for reachability of %s: %v", url, err)
return false, nil
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
Logf("Got error reading response from %s: %v", url, err)
return false, nil
}
if resp.StatusCode != 200 {
return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s", resp.Status, url, string(body))
}
if !strings.Contains(string(body), expect) {
return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body))
}
Logf("Successfully reached %v", url)
return true, nil
2015-09-29 21:30:45 +00:00
}
func testNotReachableHTTP(ip string, port int) (bool, error) {
url := fmt.Sprintf("http://%s:%d", ip, port)
if ip == "" {
Failf("Got empty IP for non-reachability check (%s)", url)
return false, nil
}
if port == 0 {
Failf("Got port==0 for non-reachability check (%s)", url)
return false, nil
}
Logf("Testing HTTP non-reachability of %v", url)
resp, err := httpGetNoConnectionPool(url)
if err != nil {
Logf("Confirmed that %s is not reachable", url)
return true, nil
}
resp.Body.Close()
return false, nil
}
func testReachableUDP(ip string, port int, request string, expect string) (bool, error) {
uri := fmt.Sprintf("udp://%s:%d", ip, port)
if ip == "" {
Failf("Got empty IP for reachability check (%s)", uri)
return false, nil
}
if port == 0 {
Failf("Got port==0 for reachability check (%s)", uri)
return false, nil
}
Logf("Testing UDP reachability of %v", uri)
con, err := net.Dial("udp", ip+":"+strconv.Itoa(port))
2015-09-28 20:57:58 +00:00
if err != nil {
return false, fmt.Errorf("Failed to dial %s:%d: %v", ip, port, err)
2015-09-28 20:57:58 +00:00
}
_, err = con.Write([]byte(fmt.Sprintf("%s\n", request)))
2015-09-28 20:57:58 +00:00
if err != nil {
return false, fmt.Errorf("Failed to send request: %v", err)
2015-09-28 20:57:58 +00:00
}
var buf []byte = make([]byte, len(expect)+1)
err = con.SetDeadline(time.Now().Add(3 * time.Second))
if err != nil {
return false, fmt.Errorf("Failed to set deadline: %v", err)
}
2015-09-28 20:57:58 +00:00
_, err = con.Read(buf)
if err != nil {
return false, nil
2015-09-28 20:57:58 +00:00
}
if !strings.Contains(string(buf), expect) {
return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf))
2015-09-28 20:57:58 +00:00
}
Logf("Successfully reached %v", uri)
return true, nil
2015-09-28 20:57:58 +00:00
}
func testNotReachableUDP(ip string, port int, request string) (bool, error) {
uri := fmt.Sprintf("udp://%s:%d", ip, port)
if ip == "" {
Failf("Got empty IP for reachability check (%s)", uri)
return false, nil
}
if port == 0 {
Failf("Got port==0 for reachability check (%s)", uri)
return false, nil
}
Logf("Testing UDP non-reachability of %v", uri)
con, err := net.Dial("udp", ip+":"+strconv.Itoa(port))
2015-09-29 21:30:45 +00:00
if err != nil {
Logf("Confirmed that %s is not reachable", uri)
return true, nil
2015-09-29 21:30:45 +00:00
}
2015-05-22 23:01:35 +00:00
_, err = con.Write([]byte(fmt.Sprintf("%s\n", request)))
if err != nil {
Logf("Confirmed that %s is not reachable", uri)
return true, nil
2015-05-22 23:01:35 +00:00
}
var buf []byte = make([]byte, 1)
err = con.SetDeadline(time.Now().Add(3 * time.Second))
if err != nil {
return false, fmt.Errorf("Failed to set deadline: %v", err)
2015-05-22 23:01:35 +00:00
}
_, err = con.Read(buf)
if err != nil {
Logf("Confirmed that %s is not reachable", uri)
return true, nil
}
return false, nil
2015-05-22 23:01:35 +00:00
}
2015-07-16 12:38:47 +00:00
// Creates a replication controller that serves its hostname and a service on top of it.
func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) {
podNames := make([]string, replicas)
2015-09-29 21:30:45 +00:00
By("creating service " + name + " in namespace " + ns)
2015-07-16 12:38:47 +00:00
_, err := c.Services(ns).Create(&api.Service{
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(9376),
2015-07-16 12:38:47 +00:00
Protocol: "TCP",
}},
Selector: map[string]string{
"name": name,
},
},
})
if err != nil {
return podNames, "", err
}
var createdPods []*api.Pod
maxContainerFailures := 0
config := RCConfig{
Client: c,
Image: "gcr.io/google_containers/serve_hostname:1.1",
Name: name,
Namespace: ns,
PollInterval: 3 * time.Second,
2015-10-05 20:40:41 +00:00
Timeout: podReadyBeforeTimeout,
2015-07-16 12:38:47 +00:00
Replicas: replicas,
CreatedPods: &createdPods,
MaxContainerFailures: &maxContainerFailures,
}
err = RunRC(config)
if err != nil {
return podNames, "", err
}
if len(createdPods) != replicas {
return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods))
}
for i := range createdPods {
podNames[i] = createdPods[i].ObjectMeta.Name
}
sort.StringSlice(podNames).Sort()
service, err := c.Services(ns).Get(name)
if err != nil {
return podNames, "", err
}
if service.Spec.ClusterIP == "" {
return podNames, "", fmt.Errorf("Service IP is blank for %v", name)
}
serviceIP := service.Spec.ClusterIP
return podNames, serviceIP, nil
}
func stopServeHostnameService(c *client.Client, ns, name string) error {
if err := DeleteRC(c, ns, name); err != nil {
return err
}
if err := c.Services(ns).Delete(name); err != nil {
return err
}
return nil
}
// verifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the
// given host and from within a pod. The host is expected to be an SSH-able node
// in the cluster. Each pod in the service is expected to echo its name. These
// names are compared with the given expectedPods list after a sort | uniq.
func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPods []string, serviceIP string, servicePort int) error {
execPodName := "execpod"
createExecPodOrFail(c, ns, execPodName)
defer func() {
deletePodOrFail(c, ns, execPodName)
}()
2016-02-03 19:06:08 +00:00
// Loop a bunch of times - the proxy is randomized, so we want a good
// chance of hitting each backend at least once.
2016-02-03 19:06:08 +00:00
buildCommand := func(wget string) string {
return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s:%d 2>&1 || true; echo; done",
50*len(expectedPods), wget, serviceIP, servicePort)
}
commands := []func() string{
2015-07-16 12:38:47 +00:00
// verify service from node
func() string {
2016-02-03 19:06:08 +00:00
cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -")
Logf("Executing cmd %q on host %v", cmd, host)
result, err := SSH(cmd, host, testContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
Logf("error while SSH-ing to node: %v", err)
2015-07-16 12:38:47 +00:00
}
return result.Stdout
},
// verify service from pod
func() string {
2016-02-03 19:06:08 +00:00
cmd := buildCommand("wget -q -T 1 -O -")
Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName)
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
2016-02-03 19:06:08 +00:00
output, err := RunHostCmd(ns, execPodName, cmd)
if err != nil {
2016-02-03 19:06:08 +00:00
Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output)
}
return output
},
}
sort.StringSlice(expectedPods).Sort()
By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
for _, cmdFunc := range commands {
passed := false
2016-01-23 00:12:02 +00:00
gotPods := []string{}
// Retry cmdFunc for a while
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
2016-01-23 00:12:02 +00:00
pods := strings.Split(strings.TrimSpace(cmdFunc()), "\n")
// Uniq pods before the sort because inserting them into a set
// (which is implemented using dicts) can re-order them.
2016-02-03 19:06:08 +00:00
gotPods = sets.NewString(pods...).List()
2016-01-23 00:12:02 +00:00
if api.Semantic.DeepEqual(gotPods, expectedPods) {
2015-07-16 12:38:47 +00:00
passed = true
break
}
2016-01-23 00:12:02 +00:00
Logf("Waiting for expected pods for %s: %v, got: %v", serviceIP, expectedPods, gotPods)
2015-07-16 12:38:47 +00:00
}
if !passed {
2016-01-23 00:12:02 +00:00
return fmt.Errorf("service verification failed for: %s, expected %v, got %v", serviceIP, expectedPods, gotPods)
2015-07-16 12:38:47 +00:00
}
}
return nil
}
func verifyServeHostnameServiceDown(c *client.Client, host string, serviceIP string, servicePort int) error {
command := fmt.Sprintf(
"curl -s --connect-timeout 2 http://%s:%d && exit 99", serviceIP, servicePort)
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := SSH(command, host, testContext.Provider)
2015-07-16 12:38:47 +00:00
if err != nil {
LogSSHResult(result)
2015-07-16 12:38:47 +00:00
Logf("error while SSH-ing to node: %v", err)
}
if result.Code != 99 {
2015-07-16 12:38:47 +00:00
return nil
}
Logf("service still alive - still waiting")
}
return fmt.Errorf("waiting for service to be down timed out")
}
2015-05-22 23:01:35 +00:00
// Does an HTTP GET, but does not reuse TCP connections
// This masks problems where the iptables rule has changed, but we don't see it
// This is intended for relatively quick requests (status checks), so we set a short (5 seconds) timeout
2015-05-22 23:01:35 +00:00
func httpGetNoConnectionPool(url string) (*http.Response, error) {
tr := &http.Transport{
DisableKeepAlives: true,
}
client := &http.Client{
Transport: tr,
Timeout: 5 * time.Second,
2015-05-22 23:01:35 +00:00
}
return client.Get(url)
}
// A test jig to help testing.
type ServiceTestJig struct {
ID string
Name string
Client *client.Client
Labels map[string]string
}
// NewServiceTestJig allocates and inits a new ServiceTestJig.
func NewServiceTestJig(client *client.Client, name string) *ServiceTestJig {
j := &ServiceTestJig{}
j.Client = client
j.Name = name
j.ID = j.Name + "-" + string(util.NewUUID())
j.Labels = map[string]string{"testid": j.ID}
return j
}
// newServiceTemplate returns the default api.Service template for this jig, but
// does not actually create the Service. The default Service has the same name
// as the jig and exposes port 80.
func (j *ServiceTestJig) newServiceTemplate(namespace string, proto api.Protocol) *api.Service {
service := &api.Service{
ObjectMeta: api.ObjectMeta{
Namespace: namespace,
Name: j.Name,
Labels: j.Labels,
},
Spec: api.ServiceSpec{
Selector: j.Labels,
Ports: []api.ServicePort{
{
Protocol: proto,
Port: 80,
},
},
},
}
return service
}
// CreateTCPServiceOrFail creates a new TCP Service based on the jig's
// defaults. Callers can provide a function to tweak the Service object before
// it is created.
func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *api.Service)) *api.Service {
svc := j.newServiceTemplate(namespace, api.ProtocolTCP)
if tweak != nil {
tweak(svc)
}
result, err := j.Client.Services(namespace).Create(svc)
if err != nil {
Failf("Failed to create TCP Service %q: %v", svc.Name, err)
}
return result
}
// CreateUDPServiceOrFail creates a new UDP Service based on the jig's
// defaults. Callers can provide a function to tweak the Service object before
// it is created.
func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *api.Service)) *api.Service {
svc := j.newServiceTemplate(namespace, api.ProtocolUDP)
if tweak != nil {
tweak(svc)
}
result, err := j.Client.Services(namespace).Create(svc)
if err != nil {
Failf("Failed to create UDP Service %q: %v", svc.Name, err)
}
return result
}
func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.ServiceType) {
if svc.Spec.Type != svcType {
Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType)
}
expectNodePorts := false
if svcType != api.ServiceTypeClusterIP {
expectNodePorts = true
}
for i, port := range svc.Spec.Ports {
hasNodePort := (port.NodePort != 0)
if hasNodePort != expectNodePorts {
Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort)
}
if hasNodePort {
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("out-of-range nodePort (%d) for service", port.NodePort)
}
}
}
expectIngress := false
if svcType == api.ServiceTypeLoadBalancer {
expectIngress = true
}
hasIngress := len(svc.Status.LoadBalancer.Ingress) != 0
if hasIngress != expectIngress {
Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress))
}
if hasIngress {
for i, ing := range svc.Status.LoadBalancer.Ingress {
if ing.IP == "" && ing.Hostname == "" {
Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing)
}
}
}
}
// UpdateService fetches a service, calls the update function on it, and
// then attempts to send the updated service. It tries up to 3 times in the
// face of timeouts and conflicts.
func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*api.Service)) (*api.Service, error) {
for i := 0; i < 3; i++ {
service, err := j.Client.Services(namespace).Get(name)
if err != nil {
return nil, fmt.Errorf("Failed to get Service %q: %v", name, err)
}
update(service)
service, err = j.Client.Services(namespace).Update(service)
if err == nil {
return service, nil
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
return nil, fmt.Errorf("Failed to update Service %q: %v", name, err)
}
}
return nil, fmt.Errorf("Too many retries updating Service %q", name)
}
// UpdateServiceOrFail fetches a service, calls the update function on it, and
// then attempts to send the updated service. It tries up to 3 times in the
// face of timeouts and conflicts.
func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*api.Service)) *api.Service {
svc, err := j.UpdateService(namespace, name, update)
if err != nil {
Failf(err.Error())
}
return svc
}
func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *api.Service {
var err error
var service *api.Service
for i := 1; i < ServiceNodePortRange.Size; i++ {
offs1 := initial - ServiceNodePortRange.Base
offs2 := (offs1 + i) % ServiceNodePortRange.Size
newPort := ServiceNodePortRange.Base + offs2
service, err = j.UpdateService(namespace, name, func(s *api.Service) {
s.Spec.Ports[0].NodePort = newPort
})
if err != nil && strings.Contains(err.Error(), "provided port is already allocated") {
Logf("tried nodePort %d, but it is in use, will try another", newPort)
continue
}
// Otherwise err was nil or err was a real error
break
}
if err != nil {
Failf("Could not change the nodePort: %v", err)
}
return service
}
func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string) *api.Service {
var service *api.Service
Logf("Waiting up to %v for service %q to have a LoadBalancer", loadBalancerCreateTimeout, name)
pollFunc := func() (bool, error) {
svc, err := j.Client.Services(namespace).Get(name)
if err != nil {
return false, err
}
if len(svc.Status.LoadBalancer.Ingress) > 0 {
service = svc
return true, nil
}
return false, nil
}
if err := wait.PollImmediate(poll, loadBalancerCreateTimeout, pollFunc); err != nil {
Failf("Timeout waiting for service %q to have a load balancer", name)
}
return service
}
func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int) *api.Service {
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
defer func() {
if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil {
Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err)
}
}()
var service *api.Service
Logf("Waiting up to %v for service %q to have no LoadBalancer", loadBalancerCreateTimeout, name)
pollFunc := func() (bool, error) {
svc, err := j.Client.Services(namespace).Get(name)
if err != nil {
return false, err
}
if len(svc.Status.LoadBalancer.Ingress) == 0 {
service = svc
return true, nil
}
return false, nil
}
if err := wait.PollImmediate(poll, loadBalancerCreateTimeout, pollFunc); err != nil {
Failf("Timeout waiting for service %q to have no load balancer", name)
}
return service
}
func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) {
if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testReachableHTTP(host, port, "/echo?msg=hello", "hello") }); err != nil {
Failf("Could not reach HTTP service through %v:%v after %v: %v", host, port, timeout, err)
}
}
func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) {
if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testNotReachableHTTP(host, port) }); err != nil {
Failf("Could still reach HTTP service through %v:%v after %v: %v", host, port, timeout, err)
}
}
func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) {
if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testReachableUDP(host, port, "echo hello", "hello") }); err != nil {
Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
}
}
func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) {
if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testNotReachableUDP(host, port, "echo hello") }); err != nil {
Failf("Could still reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
}
}
func getIngressPoint(ing *api.LoadBalancerIngress) string {
host := ing.IP
if host == "" {
host = ing.Hostname
}
return host
}
// newRCTemplate returns the default api.ReplicationController object for
// this jig, but does not actually create the RC. The default RC has the same
// name as the jig and runs the "netexec" container.
func (j *ServiceTestJig) newRCTemplate(namespace string) *api.ReplicationController {
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Namespace: namespace,
Name: j.Name,
Labels: j.Labels,
},
Spec: api.ReplicationControllerSpec{
Replicas: 1,
Selector: j.Labels,
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: j.Labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "netexec",
Image: "gcr.io/google_containers/netexec:1.4",
Args: []string{"--http-port=80", "--udp-port=80"},
ReadinessProbe: &api.Probe{
PeriodSeconds: 3,
Handler: api.Handler{
HTTPGet: &api.HTTPGetAction{
Port: intstr.FromInt(80),
Path: "/hostName",
},
},
},
},
},
TerminationGracePeriodSeconds: new(int64),
},
},
},
}
return rc
}
// RunOrFail creates a ReplicationController and Pod(s) and waits for the
// Pod(s) to be running. Callers can provide a function to tweak the RC object
// before it is created.
func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *api.ReplicationController)) *api.ReplicationController {
rc := j.newRCTemplate(namespace)
if tweak != nil {
tweak(rc)
}
result, err := j.Client.ReplicationControllers(namespace).Create(rc)
if err != nil {
Failf("Failed to created RC %q: %v", rc.Name, err)
}
pods, err := j.waitForPodsCreated(namespace, rc.Spec.Replicas)
if err != nil {
Failf("Failed to create pods: %v", err)
}
if err := j.waitForPodsReady(namespace, pods); err != nil {
Failf("Failed waiting for pods to be running: %v", err)
}
return result
}
func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) {
timeout := 2 * time.Minute
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(j.Labels))
Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
options := api.ListOptions{LabelSelector: label}
pods, err := j.Client.Pods(namespace).List(options)
if err != nil {
return nil, err
}
found := []string{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
found = append(found, pod.Name)
}
if len(found) == replicas {
Logf("Found all %d pods", replicas)
return found, nil
}
Logf("Found %d/%d pods - will retry", len(found), replicas)
}
return nil, fmt.Errorf("Timeout waiting for %d pods to be created", replicas)
}
func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error {
timeout := 2 * time.Minute
if !checkPodsRunningReady(j.Client, namespace, pods, timeout) {
return fmt.Errorf("Timeout waiting for %d pods to be ready")
}
return nil
}
2015-05-22 23:01:35 +00:00
// Simple helper class to avoid too much boilerplate in tests
2016-01-29 22:56:37 +00:00
type ServiceTestFixture struct {
2015-05-22 23:01:35 +00:00
ServiceName string
Namespace string
Client *client.Client
TestId string
Labels map[string]string
2015-06-17 07:13:26 +00:00
rcs map[string]bool
2015-05-22 23:01:35 +00:00
services map[string]bool
2015-06-17 07:13:26 +00:00
name string
image string
2015-05-22 23:01:35 +00:00
}
2016-01-29 22:56:37 +00:00
func NewServerTest(client *client.Client, namespace string, serviceName string) *ServiceTestFixture {
t := &ServiceTestFixture{}
2015-05-22 23:01:35 +00:00
t.Client = client
t.Namespace = namespace
t.ServiceName = serviceName
t.TestId = t.ServiceName + "-" + string(util.NewUUID())
t.Labels = map[string]string{
"testid": t.TestId,
}
2015-06-17 07:13:26 +00:00
t.rcs = make(map[string]bool)
2015-05-22 23:01:35 +00:00
t.services = make(map[string]bool)
2015-06-17 07:13:26 +00:00
t.name = "webserver"
t.image = "gcr.io/google_containers/test-webserver"
2015-05-22 23:01:35 +00:00
2015-06-17 07:13:26 +00:00
return t
2015-05-22 23:01:35 +00:00
}
// Build default config for a service (which can then be changed)
2016-01-29 22:56:37 +00:00
func (t *ServiceTestFixture) BuildServiceSpec() *api.Service {
2015-05-22 23:01:35 +00:00
service := &api.Service{
ObjectMeta: api.ObjectMeta{
2015-10-01 22:16:12 +00:00
Name: t.ServiceName,
Namespace: t.Namespace,
2015-05-22 23:01:35 +00:00
},
Spec: api.ServiceSpec{
Selector: t.Labels,
Ports: []api.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
2015-05-22 23:01:35 +00:00
}},
},
}
return service
}
2015-06-17 07:13:26 +00:00
// CreateWebserverRC creates rc-backed pods with the well-known webserver
// configuration and records it for cleanup.
2016-01-29 22:56:37 +00:00
func (t *ServiceTestFixture) CreateWebserverRC(replicas int) *api.ReplicationController {
2015-09-28 20:57:58 +00:00
rcSpec := rcByNamePort(t.name, replicas, t.image, 80, api.ProtocolTCP, t.Labels)
rcAct, err := t.createRC(rcSpec)
if err != nil {
Failf("Failed to create rc %s: %v", rcSpec.Name, err)
}
if err := verifyPods(t.Client, t.Namespace, t.name, false, replicas); err != nil {
Failf("Failed to create %d pods with name %s: %v", replicas, t.name, err)
}
return rcAct
}
2015-06-17 07:13:26 +00:00
// createRC creates a replication controller and records it for cleanup.
2016-01-29 22:56:37 +00:00
func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.ReplicationController, error) {
2015-06-17 07:13:26 +00:00
rc, err := t.Client.ReplicationControllers(t.Namespace).Create(rc)
2015-05-22 23:01:35 +00:00
if err == nil {
2015-06-17 07:13:26 +00:00
t.rcs[rc.Name] = true
2015-05-22 23:01:35 +00:00
}
2015-06-17 07:13:26 +00:00
return rc, err
2015-05-22 23:01:35 +00:00
}
// Create a service, and record it for cleanup
2016-01-29 22:56:37 +00:00
func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service, error) {
2015-05-22 23:01:35 +00:00
result, err := t.Client.Services(t.Namespace).Create(service)
if err == nil {
t.services[service.Name] = true
}
return result, err
}
// Delete a service, and remove it from the cleanup list
2016-01-29 22:56:37 +00:00
func (t *ServiceTestFixture) DeleteService(serviceName string) error {
2015-05-22 23:01:35 +00:00
err := t.Client.Services(t.Namespace).Delete(serviceName)
if err == nil {
delete(t.services, serviceName)
}
return err
}
2016-01-29 22:56:37 +00:00
func (t *ServiceTestFixture) Cleanup() []error {
2015-05-22 23:01:35 +00:00
var errs []error
2015-06-17 07:13:26 +00:00
for rcName := range t.rcs {
By("stopping RC " + rcName + " in namespace " + t.Namespace)
// First, resize the RC to 0.
old, err := t.Client.ReplicationControllers(t.Namespace).Get(rcName)
2015-05-22 23:01:35 +00:00
if err != nil {
errs = append(errs, err)
}
2015-06-17 07:13:26 +00:00
old.Spec.Replicas = 0
if _, err := t.Client.ReplicationControllers(t.Namespace).Update(old); err != nil {
errs = append(errs, err)
}
2015-08-26 17:05:34 +00:00
// TODO(mikedanese): Wait.
2015-06-17 07:13:26 +00:00
// Then, delete the RC altogether.
if err := t.Client.ReplicationControllers(t.Namespace).Delete(rcName); err != nil {
errs = append(errs, err)
}
2015-05-22 23:01:35 +00:00
}
for serviceName := range t.services {
By("deleting service " + serviceName + " in namespace " + t.Namespace)
err := t.Client.Services(t.Namespace).Delete(serviceName)
if err != nil {
errs = append(errs, err)
}
}
return errs
}