mirror of https://github.com/k3s-io/k3s
commit
201d60b426
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: l7-lb-controller
|
||||
name: l7-lb-controller-v0.5.2
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
|
|
|
@ -70,6 +70,10 @@ var (
|
|||
// Name of the loadbalancer controller within the cluster addon
|
||||
lbContainerName = "l7-lb-controller"
|
||||
|
||||
// Labels used to identify existing loadbalancer controllers.
|
||||
// TODO: Pull this out of the RC manifest.
|
||||
clusterAddonLBLabels = map[string]string{"k8s-app": "glbc"}
|
||||
|
||||
// If set, the test tries to perform an HTTP GET on each url endpoint of
|
||||
// the Ingress. Only set to false to short-circuit test runs in debugging.
|
||||
verifyHTTPGET = true
|
||||
|
@ -274,13 +278,19 @@ func (cont *IngressController) create() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
rc := rcFromManifest(cont.rcPath)
|
||||
existingRc, err := cont.c.ReplicationControllers(api.NamespaceSystem).Get(lbContainerName)
|
||||
|
||||
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(clusterAddonLBLabels))}
|
||||
existingRCs, err := cont.c.ReplicationControllers(api.NamespaceSystem).List(listOpts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(existingRCs.Items) != 1 {
|
||||
Failf("Unexpected number of lb cluster addons %v with label %v in kube-system namespace", len(existingRCs.Items), clusterAddonLBLabels)
|
||||
}
|
||||
|
||||
// Merge the existing spec and new spec. The modifications should not
|
||||
// manifest as functional changes to the controller. Most importantly, the
|
||||
// podTemplate shouldn't change (but for the additional test cmd line flags)
|
||||
// to ensure we test actual cluster functionality across upgrades.
|
||||
rc.Spec = existingRc.Spec
|
||||
rc.Spec = existingRCs.Items[0].Spec
|
||||
rc.Name = "glbc"
|
||||
rc.Namespace = cont.ns
|
||||
rc.Labels = controllerLabels
|
||||
|
@ -413,7 +423,7 @@ var _ = Describe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() {
|
|||
client = framework.Client
|
||||
ns = framework.Namespace.Name
|
||||
// Scaled down the existing Ingress controller so it doesn't interfere with the test.
|
||||
Expect(scaleRCByName(client, api.NamespaceSystem, lbContainerName, 0)).NotTo(HaveOccurred())
|
||||
Expect(scaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 0)).NotTo(HaveOccurred())
|
||||
addonDir = filepath.Join(
|
||||
testContext.RepoRoot, "cluster", "addons", "cluster-loadbalancing", "glbc")
|
||||
|
||||
|
@ -464,7 +474,7 @@ var _ = Describe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() {
|
|||
return true, nil
|
||||
})
|
||||
// TODO: Remove this once issue #17802 is fixed
|
||||
Expect(scaleRCByName(client, ingController.rc.Namespace, ingController.rc.Name, 0)).NotTo(HaveOccurred())
|
||||
Expect(scaleRCByLabels(client, ingController.rc.Namespace, ingController.rc.Labels, 0)).NotTo(HaveOccurred())
|
||||
|
||||
// If the controller failed to cleanup the test will fail, but we want to cleanup
|
||||
// resources before that.
|
||||
|
@ -475,7 +485,7 @@ var _ = Describe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() {
|
|||
Failf("Failed to cleanup GCE L7 resources.")
|
||||
}
|
||||
// Restore the cluster Addon.
|
||||
Expect(scaleRCByName(client, api.NamespaceSystem, lbContainerName, 1)).NotTo(HaveOccurred())
|
||||
Expect(scaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 1)).NotTo(HaveOccurred())
|
||||
framework.afterEach()
|
||||
Logf("Successfully verified GCE L7 loadbalancer via Ingress.")
|
||||
})
|
||||
|
|
|
@ -2911,22 +2911,39 @@ func getNodePortURL(client *client.Client, ns, name string, svcPort int) (string
|
|||
return "", fmt.Errorf("Failed to find external address for service %v", name)
|
||||
}
|
||||
|
||||
// scaleRCByName scales an RC via ns/name lookup. If replicas == 0 it waits till
|
||||
// scaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||
func scaleRCByName(client *client.Client, ns, name string, replicas uint) error {
|
||||
if err := ScaleRC(client, ns, name, replicas, false); err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := client.ReplicationControllers(ns).Get(name)
|
||||
func scaleRCByLabels(client *client.Client, ns string, l map[string]string, replicas uint) error {
|
||||
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))}
|
||||
rcs, err := client.ReplicationControllers(ns).List(listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if replicas == 0 {
|
||||
return waitForRCPodsGone(client, rc)
|
||||
} else {
|
||||
return waitForPodsWithLabelRunning(
|
||||
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector)))
|
||||
if len(rcs.Items) == 0 {
|
||||
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
|
||||
}
|
||||
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
||||
for _, labelRC := range rcs.Items {
|
||||
name := labelRC.Name
|
||||
if err := ScaleRC(client, ns, name, replicas, false); err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := client.ReplicationControllers(ns).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if replicas == 0 {
|
||||
if err := waitForRCPodsGone(client, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := waitForPodsWithLabelRunning(
|
||||
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
|
||||
|
|
Loading…
Reference in New Issue