mirror of https://github.com/k3s-io/k3s
Use framework.ExpectNoError() on e2e/common
parent
314264aeaf
commit
b17384acbe
|
@ -182,7 +182,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
|||
configMap.ResourceVersion = "" // to force update
|
||||
configMap.Data["data-1"] = "value-2"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
|
@ -446,14 +446,14 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
|||
|
||||
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
|
||||
updateConfigMap.ResourceVersion = "" // to force update
|
||||
delete(updateConfigMap.Data, "data-1")
|
||||
updateConfigMap.Data["data-3"] = "value-3"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
|
||||
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
|
||||
|
|
|
@ -163,7 +163,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
podClient.CreateSync(pod)
|
||||
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
|
||||
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
|
|
|
@ -392,11 +392,11 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
|||
|
||||
By("waiting for pod running")
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "while waiting for pod to be running")
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
|
||||
framework.ExpectNoError(err, "failed to delete pod")
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -476,7 +476,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
|||
|
||||
By("waiting for pod running")
|
||||
err := framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "while waiting for pod to be running")
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
By("creating a file in subpath")
|
||||
cmd := "touch /volume_mount/mypath/foo/test.log"
|
||||
|
@ -499,11 +499,11 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
|||
|
||||
By("waiting for annotated pod running")
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "while waiting for annotated pod to be running")
|
||||
framework.ExpectNoError(err, "while waiting for annotated pod to be running")
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
|
||||
framework.ExpectNoError(err, "failed to delete pod")
|
||||
})
|
||||
|
||||
/*
|
||||
|
|
|
@ -91,7 +91,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
framework.ExpectNoError(err, "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
defer cancel()
|
||||
|
@ -162,7 +162,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
framework.ExpectNoError(err, "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
defer cancel()
|
||||
|
@ -234,7 +234,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
framework.ExpectNoError(err, "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
|
@ -352,7 +352,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||
startedPod := podClient.Create(pod)
|
||||
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
framework.ExpectNoError(err, "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
|
|
|
@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
|
|||
})
|
||||
// a timeout is acceptable, since it means we waited 5 minutes and didn't see any unwarranted node status updates
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for infrequent nodestatus update")
|
||||
framework.ExpectNoError(err, "error waiting for infrequent nodestatus update")
|
||||
}
|
||||
|
||||
By("verify node is still in ready status even though node status report is infrequent")
|
||||
|
|
|
@ -67,12 +67,12 @@ func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool)
|
|||
msg := fmt.Sprintf("cmd %v, stdout %q, stderr %q", cmd, stdout, stderr)
|
||||
|
||||
if expectSuccess {
|
||||
Expect(err).NotTo(HaveOccurred(), msg)
|
||||
framework.ExpectNoError(err, msg)
|
||||
// We need to clean up the dummy link that was created, as it
|
||||
// leaks out into the node level -- yuck.
|
||||
_, _, err := c.f.ExecCommandInContainerWithFullOutput(
|
||||
c.privilegedPod, containerName, reverseCmd...)
|
||||
Expect(err).NotTo(HaveOccurred(),
|
||||
framework.ExpectNoError(err,
|
||||
fmt.Sprintf("could not remove dummy1 link: %v", err))
|
||||
} else {
|
||||
Expect(err).To(HaveOccurred(), msg)
|
||||
|
|
|
@ -188,7 +188,7 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
|
|||
configMap.ResourceVersion = "" // to force update
|
||||
configMap.Data["data-1"] = "value-2"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
|
@ -374,14 +374,14 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
|
|||
|
||||
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
|
||||
updateConfigMap.ResourceVersion = "" // to force update
|
||||
delete(updateConfigMap.Data, "data-1")
|
||||
updateConfigMap.Data["data-3"] = "value-3"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
|
||||
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
|
||||
|
|
|
@ -163,7 +163,7 @@ var _ = Describe("[sig-storage] Projected downwardAPI", func() {
|
|||
podClient.CreateSync(pod)
|
||||
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
|
||||
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
|
|
|
@ -382,14 +382,14 @@ var _ = Describe("[sig-storage] Projected secret", func() {
|
|||
|
||||
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
|
||||
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
|
||||
updateSecret.ResourceVersion = "" // to force update
|
||||
delete(updateSecret.Data, "data-1")
|
||||
updateSecret.Data["data-3"] = []byte("value-3")
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
|
||||
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
|
||||
|
|
|
@ -347,14 +347,14 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
|||
|
||||
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
|
||||
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
|
||||
updateSecret.ResourceVersion = "" // to force update
|
||||
delete(updateSecret.Data, "data-1")
|
||||
updateSecret.Data["data-3"] = []byte("value-3")
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
|
||||
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
|
||||
|
|
|
@ -49,7 +49,6 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// These tests need privileged containers, which are disabled by default. Run
|
||||
|
@ -130,7 +129,7 @@ var _ = Describe("[sig-storage] GCP Volumes", func() {
|
|||
defer func() {
|
||||
volume.TestCleanup(f, config)
|
||||
err := c.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
|
||||
framework.ExpectNoError(err, "defer: Gluster delete endpoints failed")
|
||||
}()
|
||||
|
||||
tests := []volume.Test{
|
||||
|
|
Loading…
Reference in New Issue