Browse Source

Enable E2E testing with local k3s binary

Signed-off-by: Derek Nola <derek.nola@suse.com>
pull/6131/head
Derek Nola 2 years ago
parent
commit
1972fb7cd6
  1. 4
      tests/e2e/amd64_resource_files/local-path-provisioner.yaml
  2. 49
      tests/e2e/testutils.go
  3. 19
      tests/e2e/validatecluster/validatecluster_test.go

4
tests/e2e/amd64_resource_files/local-path-provisioner.yaml

@ -26,6 +26,10 @@ spec:
mountPath: /data
ports:
- containerPort: 80
# This is only used if the regcred secret is created
# which increases the dockerhub pull rate limit
imagePullSecrets:
- name: regcred
volumes:
- name: volv
persistentVolumeClaim:

49
tests/e2e/testutils.go

@ -128,6 +128,8 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
var testOptions string
var cmd string
for _, env := range os.Environ() {
if strings.HasPrefix(env, "E2E_") {
testOptions += " " + env
@ -135,14 +137,27 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
}
testOptions += " E2E_RELEASE_VERSION=skip"
cmd := fmt.Sprintf(`%s vagrant up --no-provision &> vagrant.log`, nodeEnvs)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed creating nodes: %s: %v", cmd, err)
// Bring up the all of the nodes in parallel
errg, _ := errgroup.WithContext(context.Background())
for i, node := range append(serverNodeNames, agentNodeNames...) {
if i == 0 {
cmd = fmt.Sprintf(`%s %s vagrant up --no-provision %s &> vagrant.log`, nodeEnvs, testOptions, node)
} else {
cmd = fmt.Sprintf(`%s %s vagrant up --no-provision %s &>> vagrant.log`, nodeEnvs, testOptions, node)
}
errg.Go(func() error {
if _, err := RunCommand(cmd); err != nil {
return fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
}
return nil
})
// libVirt/Virtualbox needs some time between provisioning nodes
time.Sleep(10 * time.Second)
}
nodeRoles := append(serverNodeNames, agentNodeNames...)
for _, node := range nodeRoles {
if err := errg.Wait(); err != nil {
return nil, nil, err
}
for _, node := range append(serverNodeNames, agentNodeNames...) {
cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
@ -152,9 +167,21 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
}
}
cmd = fmt.Sprintf(`%s %s vagrant provision &>> vagrant.log`, nodeEnvs, testOptions)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
// Install K3s on all nodes in parallel
errg, _ = errgroup.WithContext(context.Background())
for _, node := range append(serverNodeNames, agentNodeNames...) {
cmd = fmt.Sprintf(`%s %s vagrant provision %s &>> vagrant.log`, nodeEnvs, testOptions, node)
errg.Go(func() error {
if _, err := RunCommand(cmd); err != nil {
return fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
}
return nil
})
// K3s needs some time between joining nodes to avoid learner issues
time.Sleep(20 * time.Second)
}
if err := errg.Wait(); err != nil {
return nil, nil, err
}
return serverNodeNames, agentNodeNames, nil
@ -334,7 +361,7 @@ func DockerLogin(kubeConfig string, ci bool) error {
return nil
}
// Authenticate to docker hub to increade pull limit
cmd := fmt.Sprintf("kubectl create secret docker-registry regcred --from-file=%s --type=kubernetes.io/dockerconfigjson --kubeconfig=%s",
cmd := fmt.Sprintf("kubectl create secret docker-registry regcred --from-file=.dockerconfigjson=%s --kubeconfig=%s",
"../amd64_resource_files/docker_cred.json", kubeConfig)
res, err := RunCommand(cmd)
if err != nil {

19
tests/e2e/validatecluster/validatecluster_test.go

@ -20,6 +20,7 @@ var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
@ -41,7 +42,11 @@ var _ = Describe("Verify Create", Ordered, func() {
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog())
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
@ -79,8 +84,8 @@ var _ = Describe("Verify Create", Ordered, func() {
})
It("Verifies ClusterIP Service", func() {
_, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
res, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed: "+res)
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
@ -209,8 +214,8 @@ var _ = Describe("Verify Create", Ordered, func() {
})
It("Verifies Local Path Provisioner storage ", func() {
_, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
res, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed: "+res)
Eventually(func(g Gomega) {
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
@ -233,7 +238,7 @@ var _ = Describe("Verify Create", Ordered, func() {
Expect(err).NotTo(HaveOccurred())
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
res, err = e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
@ -267,7 +272,7 @@ var _ = Describe("Verify Create", Ordered, func() {
var failed = false
var _ = AfterEach(func() {
failed = failed || CurrentGinkgoTestDescription().Failed
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {

Loading…
Cancel
Save