Use framework.ExpectNoError() for e2e/lifecycle

The e2e test framework has ExpectNoError() for readable test code.
This replaces Expect(err).NotTo(HaveOccurred()) with it for e2e/lifecycle.
k3s-v1.15.3
Akihito INOH 2019-05-09 16:56:45 +09:00 committed by atoato88
parent e9af72c6e9
commit be4af8f83f
5 changed files with 30 additions and 32 deletions

View File

@ -227,7 +227,7 @@ var _ = SIGDescribe("Addon update", func() {
var err error
sshClient, err = getMasterSSHClient()
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get the master SSH client.")
framework.ExpectNoError(err, "Failed to get the master SSH client.")
})
ginkgo.AfterEach(func() {
@ -275,7 +275,7 @@ var _ = SIGDescribe("Addon update", func() {
for _, p := range remoteFiles {
err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
framework.ExpectNoError(err, "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
}
// directory on kubernetes-master
@ -284,7 +284,7 @@ var _ = SIGDescribe("Addon update", func() {
// cleanup from previous tests
_, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
framework.ExpectNoError(err, "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup
sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir))
@ -300,7 +300,8 @@ var _ = SIGDescribe("Addon update", func() {
// Delete the "ensure exist class" addon at the end.
defer func() {
e2elog.Logf("Cleaning up ensure exist class addon.")
gomega.Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(gomega.HaveOccurred())
err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)
framework.ExpectNoError(err)
}()
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true)
@ -386,7 +387,7 @@ func getMasterSSHClient() (*ssh.Client, error) {
func sshExecAndVerify(client *ssh.Client, cmd string) {
_, _, rc, err := sshExec(client, cmd)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to execute %q with ssh client %+v", cmd, client)
framework.ExpectNoError(err, "Failed to execute %q with ssh client %+v", cmd, client)
gomega.Expect(rc).To(gomega.Equal(0), "error return code from executing command on the cluster: %s", cmd)
}

View File

@ -46,7 +46,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
var statusCode int
result.StatusCode(&statusCode)
@ -54,7 +54,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
})
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
var statusCode int
result.StatusCode(&statusCode)

View File

@ -35,7 +35,6 @@ import (
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -70,7 +69,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
namespaceName := metav1.NamespaceSystem
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
for _, e := range events.Items {
e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
@ -51,7 +50,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
c = f.ClientSet
ns = f.Namespace.Name
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
@ -104,7 +103,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// the cluster is restored to health.
ginkgo.By("waiting for system pods to successfully restart")
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
ginkgo.It("should be able to delete nodes", func() {
@ -112,20 +111,20 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node"
numNodes, err := framework.NumberOfRegisteredNodes(c)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
originalNodeCount = int32(numNodes)
common.NewRCByName(c, ns, name, originalNodeCount, nil)
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
ginkgo.By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
err = framework.ResizeGroup(group, targetNumNodes)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = framework.WaitForGroupSize(group, targetNumNodes)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
"the now non-existent node and the RC to recreate it")
@ -133,7 +132,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
ginkgo.By("verifying whether the pods from the removed node are recreated")
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
// TODO: Bug here - testName is not correct
@ -143,26 +142,26 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
name := "my-hostname-add-node"
common.NewSVCByName(c, ns, name)
numNodes, err := framework.NumberOfRegisteredNodes(c)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
originalNodeCount = int32(numNodes)
common.NewRCByName(c, ns, name, originalNodeCount, nil)
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)
ginkgo.By(fmt.Sprintf("increasing cluster size to %d", targetNumNodes))
err = framework.ResizeGroup(group, targetNumNodes)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = framework.WaitForGroupSize(group, targetNumNodes)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
err = resizeRC(c, ns, name, originalNodeCount+1)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = framework.VerifyPods(c, ns, name, true, originalNodeCount+1)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
})
})

View File

@ -29,7 +29,6 @@ import (
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
func nodeNames(nodes []v1.Node) []string {
@ -54,14 +53,14 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
framework.SkipUnlessProviderIs("gce", "gke")
var err error
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
systemNamespace = metav1.NamespaceSystem
ginkgo.By("ensuring all nodes are ready")
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
ginkgo.By("ensuring all pods are running and ready")
@ -87,11 +86,11 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
ginkgo.It("should restart all nodes and ensure all nodes and pods recover", func() {
ginkgo.By("restarting all of the nodes")
err := common.RestartNodes(f.ClientSet, originalNodes)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("ensuring all nodes are ready after the restart")
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
// Make sure that we have the same number of nodes. We're not checking
@ -108,7 +107,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
ginkgo.By("ensuring the same number of pods are running and ready after restart")
podCheckStart := time.Now()
podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
pods := ps.List()