Browse Source

E2E: Startup test cleanup + RunCommand Enhancement (#7388)

* Add beforesuite to startup
* Reduce timeouts for startup
* Fix cleanup + set kubeconfig

Signed-off-by: Derek Nola <derek.nola@suse.com>
pull/7414/head
Derek Nola 2 years ago committed by GitHub
parent
commit
7175ebe2be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      .drone.yml
  2. 44
      tests/e2e/startup/startup_test.go
  3. 11
      tests/e2e/testutils.go

1
.drone.yml

@ -752,6 +752,7 @@ steps:
- | - |
if [ "$DRONE_BUILD_EVENT" = "pull_request" ]; then if [ "$DRONE_BUILD_EVENT" = "pull_request" ]; then
cd ../upgradecluster cd ../upgradecluster
vagrant destroy -f
E2E_RELEASE_CHANNEL="latest" go test -v -timeout=45m ./upgradecluster_test.go -ci -local E2E_RELEASE_CHANNEL="latest" go test -v -timeout=45m ./upgradecluster_test.go -ci -local
fi fi
- docker stop registry && docker rm registry - docker stop registry && docker rm registry

44
tests/e2e/startup/startup_test.go

@ -71,23 +71,29 @@ func KillK3sCluster(nodes []string) error {
if _, err := e2e.RunCmdOnNode("sudo k3s-killall.sh", node); err != nil { if _, err := e2e.RunCmdOnNode("sudo k3s-killall.sh", node); err != nil {
return err return err
} }
if strings.Contains(node, "server") {
if _, err := e2e.RunCmdOnNode("sudo rm -rf /var/lib/rancher/k3s/server/db", node); err != nil {
return err
}
}
} }
return nil return nil
} }
var _ = ReportAfterEach(e2e.GenReport) var _ = ReportAfterEach(e2e.GenReport)
var _ = BeforeSuite(func() {
var err error
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 1)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 1)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})
var _ = Describe("Various Startup Configurations", Ordered, func() { var _ = Describe("Various Startup Configurations", Ordered, func() {
Context("Verify CRI-Dockerd :", func() { Context("Verify CRI-Dockerd :", func() {
It("Stands up the nodes", func() {
var err error
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 1)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 1)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})
It("Starts K3s with no issues", func() { It("Starts K3s with no issues", func() {
dockerYAML := "docker: true" dockerYAML := "docker: true"
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), dockerYAML, dockerYAML) err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), dockerYAML, dockerYAML)
@ -109,7 +115,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
for _, node := range nodes { for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready")) g.Expect(node.Status).Should(Equal("Ready"))
} }
}, "620s", "5s").Should(Succeed()) }, "360s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true) _, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching pods status\n") fmt.Printf("\nFetching pods status\n")
@ -123,7 +129,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name) g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
} }
} }
}, "620s", "5s").Should(Succeed()) }, "360s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true) _, _ = e2e.ParsePods(kubeConfigFile, true)
}) })
It("Kills the cluster", func() { It("Kills the cluster", func() {
@ -153,7 +159,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
for _, node := range nodes { for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready")) g.Expect(node.Status).Should(Equal("Ready"))
} }
}, "620s", "5s").Should(Succeed()) }, "360s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true) _, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching pods status\n") fmt.Printf("\nFetching pods status\n")
@ -167,7 +173,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name) g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
} }
} }
}, "620s", "5s").Should(Succeed()) }, "360s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true) _, _ = e2e.ParsePods(kubeConfigFile, true)
}) })
It("Kills the cluster", func() { It("Kills the cluster", func() {
@ -197,7 +203,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
for _, node := range nodes { for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready")) g.Expect(node.Status).Should(Equal("Ready"))
} }
}, "620s", "5s").Should(Succeed()) }, "360s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true) _, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching pods status\n") fmt.Printf("\nFetching pods status\n")
@ -211,21 +217,21 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name) g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
} }
} }
}, "620s", "5s").Should(Succeed()) }, "360s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true) _, _ = e2e.ParsePods(kubeConfigFile, true)
}) })
It("Returns pod metrics", func() { It("Returns pod metrics", func() {
cmd := "kubectl top pod -A" cmd := "kubectl top pod -A"
Eventually(func() error { Eventually(func() error {
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) _, err := e2e.RunCommand(cmd)
return err return err
}, "620s", "5s").Should(Succeed()) }, "600s", "5s").Should(Succeed())
}) })
It("Returns node metrics", func() { It("Returns node metrics", func() {
cmd := "kubectl top node" cmd := "kubectl top node"
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) _, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -237,7 +243,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
It("Collects logs from a pod", func() { It("Collects logs from a pod", func() {
cmd := "kubectl logs -n kube-system -l app.kubernetes.io/name=traefik -c traefik" cmd := "kubectl logs -n kube-system -l app.kubernetes.io/name=traefik -c traefik"
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) _, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })

11
tests/e2e/testutils.go

@ -303,6 +303,9 @@ func GenKubeConfigFile(serverName string) (string, error) {
if err := os.WriteFile(kubeConfigFile, []byte(kubeConfig), 0644); err != nil { if err := os.WriteFile(kubeConfigFile, []byte(kubeConfig), 0644); err != nil {
return "", err return "", err
} }
if err := os.Setenv("E2E_KUBECONFIG", kubeConfigFile); err != nil {
return "", err
}
return kubeConfigFile, nil return kubeConfigFile, nil
} }
@ -437,17 +440,19 @@ func RestartClusterAgent(nodeNames []string) error {
// RunCmdOnNode executes a command from within the given node // RunCmdOnNode executes a command from within the given node
func RunCmdOnNode(cmd string, nodename string) (string, error) { func RunCmdOnNode(cmd string, nodename string) (string, error) {
runcmd := "vagrant ssh -c \"" + cmd + "\" " + nodename runcmd := "vagrant ssh " + nodename + " -c \"" + cmd + "\""
out, err := RunCommand(runcmd) out, err := RunCommand(runcmd)
if err != nil { if err != nil {
return out, fmt.Errorf("failed to run command %s on node %s: %v", cmd, nodename, err) return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, nodename, out, err)
} }
return out, nil return out, nil
} }
// RunCommand executes a command on the host
func RunCommand(cmd string) (string, error) { func RunCommand(cmd string) (string, error) {
c := exec.Command("bash", "-c", cmd) c := exec.Command("bash", "-c", cmd)
if kc, ok := os.LookupEnv("E2E_KUBECONFIG"); ok {
c.Env = append(os.Environ(), "KUBECONFIG="+kc)
}
out, err := c.CombinedOutput() out, err := c.CombinedOutput()
return string(out), err return string(out), err
} }

Loading…
Cancel
Save