mirror of https://github.com/k3s-io/k3s
Merge pull request #61379 from nikhiljindal/kubemciTest
Automatic merge from submit-queue (batch tested with PRs 61453, 61393, 61379, 61373, 61494). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Fixing kubemci conformance tests Ref https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/131 Changes: * Add a static ip annotation while running the tests for kubemci. kubemci requires the IP to be preallocated. * Add a default backend service. I have added it in the spec directly, so the change will be for ingress controller as well which should be fine. kubemci requires users to specify a default backend service that they need to ensure exists in all clusters. * Disabled update SSL cert test for kubemci since it does not support that. * Minor logging fixes. Verified by running the tests locally that they now pass. ``` $ KUBECONFIG=~/.kube/config KUBE_MASTER_IP="<IP>" go run hack/e2e.go -- --test --test_args="--ginkgo.focus=kubemci" • [SLOW TEST:629.179 seconds] Loadbalancing: L7 /usr/local/google/home/nikhiljindal/code/src/github.com/kubernetes/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 GCE [Slow] [Feature:kubemci] /usr/local/google/home/nikhiljindal/code/src/github.com/kubernetes/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/network/ingress.go:604 should conform to Ingress spec /usr/local/google/home/nikhiljindal/code/src/github.com/kubernetes/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/network/ingress.go:637 ------------------------------ SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSMar 19 19:45:59.242: INFO: Running AfterSuite actions on all node Mar 19 19:45:59.242: INFO: Running AfterSuite actions on node 1 Ran 1 of 820 Specs in 631.245 seconds SUCCESS! -- 1 Passed | 0 Failed | 0 Pending | 819 Skipped PASS Ginkgo ran 1 suite in 10m32.602848558s Test Suite Passed ``` cc @G-Harmon @MrHohn ```release-note NONE ```pull/8/head
commit
85afad33eb
|
@ -165,7 +165,7 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
|||
updateURLMapHost := "bar.baz.com"
|
||||
updateURLMapPath := "/testurl"
|
||||
// Platform agnostic list of tests that must be satisfied by all controllers
|
||||
return []IngressConformanceTests{
|
||||
tests := []IngressConformanceTests{
|
||||
{
|
||||
fmt.Sprintf("should create a basic HTTP ingress"),
|
||||
func() { jig.CreateIngress(manifestPath, ns, annotations, annotations) },
|
||||
|
@ -176,27 +176,6 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
|||
func() { jig.AddHTTPS(tlsSecretName, tlsHost) },
|
||||
fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"),
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
||||
func() {
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != tlsHost {
|
||||
newRules = append(newRules, rule)
|
||||
continue
|
||||
}
|
||||
newRules = append(newRules, extensions.IngressRule{
|
||||
Host: updatedTLSHost,
|
||||
IngressRuleValue: rule.IngressRuleValue,
|
||||
})
|
||||
}
|
||||
ing.Spec.Rules = newRules
|
||||
})
|
||||
jig.AddHTTPS(tlsSecretName, updatedTLSHost)
|
||||
},
|
||||
fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
|
||||
func() {
|
||||
|
@ -233,6 +212,31 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
|||
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
||||
},
|
||||
}
|
||||
// Skip the Update TLS cert test for kubemci: https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/141.
|
||||
if jig.Class != MulticlusterIngressClassValue {
|
||||
tests = append(tests, IngressConformanceTests{
|
||||
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
||||
func() {
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != tlsHost {
|
||||
newRules = append(newRules, rule)
|
||||
continue
|
||||
}
|
||||
newRules = append(newRules, extensions.IngressRule{
|
||||
Host: updatedTLSHost,
|
||||
IngressRuleValue: rule.IngressRuleValue,
|
||||
})
|
||||
}
|
||||
ing.Spec.Rules = newRules
|
||||
})
|
||||
jig.AddHTTPS(tlsSecretName, updatedTLSHost)
|
||||
},
|
||||
fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
|
||||
})
|
||||
}
|
||||
return tests
|
||||
}
|
||||
|
||||
// GenerateRSACerts generates a basic self signed certificate using a key length
|
||||
|
@ -1131,7 +1135,7 @@ func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations m
|
|||
for k, v := range ingAnnotations {
|
||||
j.Ingress.Annotations[k] = v
|
||||
}
|
||||
j.Logger.Infof(fmt.Sprintf("creating" + j.Ingress.Name + " ingress"))
|
||||
j.Logger.Infof(fmt.Sprintf("creating " + j.Ingress.Name + " ingress"))
|
||||
j.Ingress, err = j.runCreate(j.Ingress)
|
||||
ExpectNoError(err)
|
||||
}
|
||||
|
@ -1334,7 +1338,9 @@ func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address st
|
|||
}
|
||||
for _, p := range rules.IngressRuleValue.HTTP.Paths {
|
||||
if waitForNodePort {
|
||||
if err := j.pollServiceNodePort(ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal)); err != nil {
|
||||
nodePort := int(p.Backend.ServicePort.IntVal)
|
||||
if err := j.pollServiceNodePort(ing.Namespace, p.Backend.ServiceName, nodePort); err != nil {
|
||||
j.Logger.Infof("Error in waiting for nodeport %d on service %v/%v: %s", nodePort, ing.Namespace, p.Backend.ServiceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2157,7 +2157,6 @@ func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
|
|||
|
||||
func (b kubectlBuilder) ExecOrDie() string {
|
||||
str, err := b.Exec()
|
||||
Logf("stdout: %q", str)
|
||||
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
|
||||
// Note that we're still dying after retrying so that we can get visibility to triage it further.
|
||||
if isTimeout(err) {
|
||||
|
@ -2216,6 +2215,7 @@ func (b kubectlBuilder) Exec() (string, error) {
|
|||
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
|
||||
}
|
||||
Logf("stderr: %q", stderr.String())
|
||||
Logf("stdout: %q", stdout.String())
|
||||
return stdout.String(), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -602,10 +602,20 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:kubemci]", func() {
|
||||
var gceController *framework.GCEIngressController
|
||||
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
jig.Class = framework.MulticlusterIngressClassValue
|
||||
By("Initializing gce controller")
|
||||
gceController = &framework.GCEIngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
err := gceController.Init()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
|
@ -619,16 +629,26 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
}
|
||||
By("Deleting ingress")
|
||||
jig.TryDeleteIngress()
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
Expect(gceController.CleanupGCEIngressController()).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
||||
// Use the randomly generated namespace name as the ip address name.
|
||||
ipName := ns
|
||||
// ip released when the rest of lb resources are deleted in CleanupGCEIngressController
|
||||
ipAddress := gceController.CreateStaticIP(ipName)
|
||||
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress))
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
|
||||
framework.IngressStaticIPKey: ipName,
|
||||
})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
t.Execute()
|
||||
By(t.ExitLog)
|
||||
jig.WaitForIngress(true /*waitForNodePort*/)
|
||||
jig.WaitForIngress(false /*waitForNodePort*/)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
|
|
@ -3,6 +3,10 @@ kind: Ingress
|
|||
metadata:
|
||||
name: echomap
|
||||
spec:
|
||||
# kubemci requires a default backend.
|
||||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
rules:
|
||||
- host: foo.bar.com
|
||||
http:
|
||||
|
@ -22,4 +26,3 @@ spec:
|
|||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
|
||||
|
|
Loading…
Reference in New Issue