mirror of https://github.com/k3s-io/k3s
feat: use framework.ExpectNoError instead in e2e test
parent
ef9e794a36
commit
c045046e5b
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
||||
|
@ -80,35 +79,35 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
|||
noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
|
||||
|
||||
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to watch custom resource: %s", watchCRNameA)
|
||||
framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameA)
|
||||
|
||||
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to watch custom resource: %s", watchCRNameB)
|
||||
framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameB)
|
||||
|
||||
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
|
||||
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
|
||||
|
||||
ginkgo.By("Creating first CR ")
|
||||
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to instantiate custom resource: %+v", testCrA)
|
||||
framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrA)
|
||||
expectEvent(watchA, watch.Added, testCrA)
|
||||
expectNoEvent(watchB, watch.Added, testCrA)
|
||||
|
||||
ginkgo.By("Creating second CR")
|
||||
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to instantiate custom resource: %+v", testCrB)
|
||||
framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrB)
|
||||
expectEvent(watchB, watch.Added, testCrB)
|
||||
expectNoEvent(watchA, watch.Added, testCrB)
|
||||
|
||||
ginkgo.By("Deleting first CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete custom resource: %s", watchCRNameA)
|
||||
framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameA)
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
ginkgo.By("Deleting second CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete custom resource: %s", watchCRNameB)
|
||||
framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameB)
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
})
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
||||
|
@ -46,13 +45,14 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
|||
// providers that provide those capabilities.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
|
||||
gomega.Expect(framework.RunRC(testutils.RCConfig{
|
||||
err := framework.RunRC(testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
Name: "baz",
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
})).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should recover from network partition with master", func() {
|
||||
|
@ -98,7 +98,7 @@ func doEtcdFailure(failCommand, fixCommand string) {
|
|||
func masterExec(cmd string) {
|
||||
host := framework.GetMasterHost() + ":22"
|
||||
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||
framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||
if result.Code != 0 {
|
||||
e2essh.LogResult(result)
|
||||
framework.Failf("master exec command returned non-zero")
|
||||
|
@ -123,7 +123,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
|||
}
|
||||
for _, pod := range pods.Items {
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
}
|
||||
e2elog.Logf("apiserver has recovered")
|
||||
return true, nil
|
||||
|
@ -133,7 +133,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
|||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
|
||||
framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
|
||||
return true, nil
|
||||
|
|
|
@ -42,7 +42,6 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
|
@ -735,12 +734,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to list pods in namespace: %s", f.Namespace.Name)
|
||||
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
|
||||
for i := 0; i < halfReplicas; i++ {
|
||||
pod := pods.Items[i]
|
||||
_, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("delete the rc %s", rc1Name))
|
||||
|
@ -815,36 +814,36 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
pod1Name := "pod1"
|
||||
pod1 := newGCPod(pod1Name)
|
||||
pod1, err := podClient.Create(pod1)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
|
||||
pod2Name := "pod2"
|
||||
pod2 := newGCPod(pod2Name)
|
||||
pod2, err = podClient.Create(pod2)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
|
||||
pod3Name := "pod3"
|
||||
pod3 := newGCPod(pod3Name)
|
||||
pod3, err = podClient.Create(pod3)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
|
||||
// create circular dependency
|
||||
addRefPatch := func(name string, uid types.UID) []byte {
|
||||
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
|
||||
}
|
||||
patch1 := addRefPatch(pod3.Name, pod3.UID)
|
||||
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||
e2elog.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
patch2 := addRefPatch(pod1.Name, pod1.UID)
|
||||
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||
e2elog.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
patch3 := addRefPatch(pod2.Name, pod2.UID)
|
||||
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||
e2elog.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
// delete one pod, should result in the deletion of all pods
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
|
||||
err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
|
||||
var pods *v1.PodList
|
||||
var err2 error
|
||||
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
|
||||
|
@ -1074,7 +1073,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
ginkgo.By("Create the cronjob")
|
||||
cronJob := newCronJob("simple", "*/1 * * * ?")
|
||||
cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Wait for the CronJob to create new Job")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
|
||||
|
|
|
@ -47,7 +47,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
|||
defer ginkgo.GinkgoRecover()
|
||||
ns := fmt.Sprintf("nslifetest-%v", n)
|
||||
_, err = f.CreateNamespace(ns, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", ns)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", ns)
|
||||
}(n)
|
||||
}
|
||||
wg.Wait()
|
||||
|
@ -57,7 +57,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
|||
time.Sleep(time.Duration(10 * time.Second))
|
||||
deleteFilter := []string{"nslifetest"}
|
||||
deleted, err := framework.DeleteNamespaces(f.ClientSet, deleteFilter, nil /* skipFilter */)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace(s) containing: %s", deleteFilter)
|
||||
framework.ExpectNoError(err, "failed to delete namespace(s) containing: %s", deleteFilter)
|
||||
gomega.Expect(len(deleted)).To(gomega.Equal(totalNS))
|
||||
|
||||
ginkgo.By("Waiting for namespaces to vanish")
|
||||
|
@ -86,11 +86,11 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
ginkgo.By("Creating a test namespace")
|
||||
namespaceName := "nsdeletetest"
|
||||
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
|
||||
|
||||
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
|
||||
ginkgo.By("Creating a pod in the namespace")
|
||||
podName := "test-pod"
|
||||
|
@ -108,14 +108,14 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
},
|
||||
}
|
||||
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for the pod to have running status")
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
|
||||
ginkgo.By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
|
||||
|
@ -130,7 +130,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
|
||||
ginkgo.By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
|
||||
|
||||
ginkgo.By("Verifying there are no pods in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
|
@ -143,11 +143,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
ginkgo.By("Creating a test namespace")
|
||||
namespaceName := "nsdeletetest"
|
||||
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
|
||||
|
||||
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
|
||||
ginkgo.By("Creating a service in the namespace")
|
||||
serviceName := "test-service"
|
||||
|
@ -168,11 +168,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
},
|
||||
}
|
||||
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service %s in namespace %s", serviceName, namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name)
|
||||
|
||||
ginkgo.By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60)
|
||||
|
@ -187,7 +187,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
|
||||
ginkgo.By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
|
||||
|
||||
ginkgo.By("Verifying there is no service in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
|
||||
|
|
|
@ -56,11 +56,11 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||
e2elog.Logf("Creating pod %s", podName)
|
||||
|
||||
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns)
|
||||
|
||||
table := &metav1beta1.Table{}
|
||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||
framework.ExpectNoError(err, "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||
e2elog.Logf("Table: %#v", table)
|
||||
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">", 2))
|
||||
|
@ -108,7 +108,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
gomega.Expect(len(pagedTable.Rows)).To(gomega.Equal(2))
|
||||
gomega.Expect(pagedTable.ResourceVersion).ToNot(gomega.Equal(""))
|
||||
gomega.Expect(pagedTable.SelfLink).ToNot(gomega.Equal(""))
|
||||
|
@ -120,7 +120,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
gomega.Expect(len(pagedTable.Rows)).To(gomega.BeNumerically(">", 0))
|
||||
gomega.Expect(pagedTable.Rows[0].Cells[0]).To(gomega.Equal("template-0002"))
|
||||
})
|
||||
|
@ -130,7 +130,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||
|
||||
table := &metav1beta1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get nodes in Table form across all namespaces")
|
||||
framework.ExpectNoError(err, "failed to get nodes in Table form across all namespaces")
|
||||
e2elog.Logf("Table: %#v", table)
|
||||
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">=", 2))
|
||||
|
@ -168,7 +168,7 @@ func printTable(table *metav1beta1.Table) string {
|
|||
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
|
||||
printer := printers.NewTablePrinter(printers.PrintOptions{})
|
||||
err := printer.PrintObj(table, tw)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to print table: %+v", table)
|
||||
framework.ExpectNoError(err, "failed to print table: %+v", table)
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -58,15 +57,15 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
|
||||
ginkgo.By("creating a watch on configmaps with label A")
|
||||
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
|
||||
|
||||
ginkgo.By("creating a watch on configmaps with label B")
|
||||
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
|
||||
|
||||
ginkgo.By("creating a watch on configmaps with label A or B")
|
||||
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
|
||||
testConfigMapA := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -87,7 +86,7 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
|
||||
ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification")
|
||||
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
|
||||
framework.ExpectNoError(err, "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
|
||||
expectEvent(watchA, watch.Added, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Added, testConfigMapA)
|
||||
|
@ -96,7 +95,7 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
|
@ -105,28 +104,28 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
|
||||
ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification")
|
||||
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
|
||||
expectEvent(watchB, watch.Added, testConfigMapB)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapB)
|
||||
expectNoEvent(watchA, watch.Added, testConfigMapB)
|
||||
|
||||
ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
|
@ -152,27 +151,27 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
|
||||
ginkgo.By("creating a new configmap")
|
||||
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
ginkgo.By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
ginkgo.By("modifying the configmap a second time")
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
|
||||
|
||||
ginkgo.By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
ginkgo.By("creating a watch on configmaps from the resource version returned by the first update")
|
||||
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
|
||||
|
||||
ginkgo.By("Expecting to observe notifications for all changes to the configmap after the first update")
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
|
@ -201,17 +200,17 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
|
||||
ginkgo.By("creating a watch on configmaps")
|
||||
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
|
||||
|
||||
ginkgo.By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("modifying the configmap once")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("closing the watch once it receives two notifications")
|
||||
expectEvent(testWatchBroken, watch.Added, testConfigMap)
|
||||
|
@ -225,7 +224,7 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
|
||||
ginkgo.By("creating a new watch on configmaps from the last resource version observed by the first watch")
|
||||
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
|
||||
|
@ -233,11 +232,11 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
framework.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent)
|
||||
}
|
||||
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
|
||||
framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
|
||||
|
||||
ginkgo.By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
|
||||
expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate)
|
||||
|
@ -266,23 +265,23 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
|
||||
ginkgo.By("creating a watch on configmaps with a certain label")
|
||||
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
|
||||
|
||||
ginkgo.By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("changing the label value of the configmap")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
|
||||
|
||||
ginkgo.By("Expecting to observe a delete notification for the watched object")
|
||||
expectEvent(testWatch, watch.Added, testConfigMap)
|
||||
|
@ -293,7 +292,7 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
|
||||
ginkgo.By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
|
||||
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
|
@ -302,17 +301,17 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
|
||||
|
||||
ginkgo.By("modifying the configmap a third time")
|
||||
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "3")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a third time", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a third time", configMapName, ns)
|
||||
|
||||
ginkgo.By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored")
|
||||
expectEvent(testWatch, watch.Added, testConfigMapLabelRestored)
|
||||
|
@ -347,7 +346,7 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
resourceVersion := "0"
|
||||
for i := 0; i < iterations; i++ {
|
||||
wc, err := c.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{ResourceVersion: resourceVersion})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to watch configmaps in the namespace %s", ns)
|
||||
framework.ExpectNoError(err, "Failed to watch configmaps in the namespace %s", ns)
|
||||
wcs = append(wcs, wc)
|
||||
resourceVersion = waitForNextConfigMapEvent(wcs[0]).ResourceVersion
|
||||
for _, wc := range wcs[1:] {
|
||||
|
@ -473,18 +472,18 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa
|
|||
case createEvent:
|
||||
cm.Name = name(i)
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Create(cm)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create configmap %s in namespace %s", cm.Name, ns)
|
||||
framework.ExpectNoError(err, "Failed to create configmap %s in namespace %s", cm.Name, ns)
|
||||
existing = append(existing, i)
|
||||
i++
|
||||
case updateEvent:
|
||||
idx := rand.Intn(len(existing))
|
||||
cm.Name = name(existing[idx])
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Update(cm)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to update configmap %s in namespace %s", cm.Name, ns)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns)
|
||||
case deleteEvent:
|
||||
idx := rand.Intn(len(existing))
|
||||
err := c.CoreV1().ConfigMaps(ns).Delete(name(existing[idx]), &metav1.DeleteOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
|
||||
framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
|
||||
existing = append(existing[:idx], existing[idx+1:]...)
|
||||
default:
|
||||
framework.Failf("Unsupported event operation: %d", op)
|
||||
|
|
|
@ -718,7 +718,7 @@ func testWebhook(f *framework.Framework) {
|
|||
},
|
||||
}
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
toNonCompliantFn := func(cm *v1.ConfigMap) {
|
||||
|
@ -755,7 +755,7 @@ func testWebhook(f *framework.Framework) {
|
|||
ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
|
||||
configmap = nonCompliantConfigMap(f)
|
||||
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
|
||||
}
|
||||
|
||||
func testAttachingPodWebhook(f *framework.Framework) {
|
||||
|
@ -763,9 +763,9 @@ func testAttachingPodWebhook(f *framework.Framework) {
|
|||
client := f.ClientSet
|
||||
pod := toBeAttachedPod(f)
|
||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("'kubectl attach' the pod, should be denied by the webhook")
|
||||
timer := time.NewTimer(30 * time.Second)
|
||||
|
@ -1345,7 +1345,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
|
|||
},
|
||||
}
|
||||
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
expectedCRData := map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
|
@ -1374,17 +1374,17 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.
|
|||
},
|
||||
}
|
||||
_, err := customResourceClient.Create(cr, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Patching Custom Resource Definition to set v2 as storage")
|
||||
apiVersionWithV2StoragePatch := fmt.Sprint(`{"spec": {"versions": [{"name": "v1", "storage": false, "served": true},{"name": "v2", "storage": true, "served": true}]}}`)
|
||||
_, err = testcrd.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Patching the custom resource while v2 is storage version")
|
||||
crDummyPatch := fmt.Sprint(`[{ "op": "add", "path": "/dummy", "value": "test" }]`)
|
||||
_, err = testcrd.DynamicClients["v2"].Patch(crName, types.JSONPatchType, []byte(crDummyPatch), metav1.PatchOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to patch custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to patch custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
func registerValidatingWebhookForCRD(f *framework.Framework, context *certContext) func() {
|
||||
|
|
|
@ -69,13 +69,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ginkgo.AfterEach(func() {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to dump DaemonSets")
|
||||
framework.ExpectNoError(err, "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
|
@ -128,7 +128,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
|
@ -138,7 +138,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -157,24 +157,24 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node")
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
nodeSelector[daemonsetColorLabel] = "green"
|
||||
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node")
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
|
||||
|
@ -182,11 +182,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error patching daemon set")
|
||||
framework.ExpectNoError(err, "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
@ -220,23 +220,23 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node")
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node")
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
})
|
||||
|
@ -254,7 +254,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
|
@ -264,13 +264,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
pod.ResourceVersion = ""
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
_, err = c.CoreV1().Pods(ns).UpdateStatus(&pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error failing a daemon pod")
|
||||
framework.ExpectNoError(err, "error failing a daemon pod")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
|
||||
|
||||
ginkgo.By("Wait for the failed daemon pod to be completely deleted.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for the failed daemon pod to be completely deleted")
|
||||
framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted")
|
||||
})
|
||||
|
||||
// This test should not be added to conformance. We will consider deprecating OnDelete when the
|
||||
|
@ -286,7 +286,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
|
@ -308,7 +308,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
|
@ -335,7 +335,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
|
@ -364,7 +364,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
|
@ -393,7 +393,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
e2elog.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
e2elog.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
|
@ -741,7 +741,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
|
|||
return false, nil
|
||||
}
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for controllerrevisions to be created")
|
||||
framework.ExpectNoError(err, "error waiting for controllerrevisions to be created")
|
||||
}
|
||||
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
|
||||
|
|
|
@ -274,7 +274,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %s", err)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
|
@ -350,14 +350,14 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-cleanup-deployment"
|
||||
e2elog.Logf("Creating deployment %s", deploymentName)
|
||||
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to query for pods: %v", err)
|
||||
framework.ExpectNoError(err, "Failed to query for pods: %v", err)
|
||||
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
|
@ -420,7 +420,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
e2elog.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
|
@ -803,7 +803,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||
|
||||
ginkgo.By("Wait for the ReplicaSet to be orphaned")
|
||||
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
|
@ -852,7 +852,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
// Verify that the required pods have come up.
|
||||
e2elog.Logf("Waiting for all required pods to come up")
|
||||
err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
e2elog.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
|
|
|
@ -346,7 +346,7 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
|
|||
|
||||
func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
config, err := framework.LoadConfig()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config")
|
||||
framework.ExpectNoError(err, "unable to get base config")
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
|
||||
|
|
|
@ -113,14 +113,14 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
w, err := podClient.Watch(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch")
|
||||
framework.ExpectNoError(err, "failed to set up watch")
|
||||
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
podClient.Create(pod)
|
||||
|
@ -129,7 +129,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
||||
|
||||
ginkgo.By("verifying pod creation was observed")
|
||||
|
@ -149,7 +149,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||
ginkgo.By("ensuring pod is modified")
|
||||
// save the running pod
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
|
||||
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
||||
|
||||
// check the annotation is there
|
||||
if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok {
|
||||
|
@ -233,14 +233,14 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
w, err := podClient.Watch(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch")
|
||||
framework.ExpectNoError(err, "failed to set up watch")
|
||||
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
podClient.Create(originalPod)
|
||||
|
@ -249,7 +249,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
||||
|
||||
ginkgo.By("verifying pod creation was observed")
|
||||
|
@ -269,7 +269,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||
ginkgo.By("ensuring pod is modified")
|
||||
// save the running pod
|
||||
pod, err := podClient.Get(originalPod.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
|
||||
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
||||
|
||||
// check the annotation is not there
|
||||
if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok {
|
||||
|
|
|
@ -125,7 +125,7 @@ func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, vol
|
|||
|
||||
stdoutRW, stderrRW, errRW := f.ExecCommandInContainerWithFullOutput(podName, rwcontainerName, cmd...)
|
||||
msg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", cmd, stdoutRW, stderrRW)
|
||||
gomega.Expect(errRW).NotTo(gomega.HaveOccurred(), msg)
|
||||
framework.ExpectNoError(errRW, msg)
|
||||
|
||||
_, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
||||
gomega.Expect(stderr).To(gomega.Equal("Access is denied."))
|
||||
|
@ -134,7 +134,7 @@ func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, vol
|
|||
readout, readerr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, readcmd...)
|
||||
readmsg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", readcmd, readout, readerr)
|
||||
gomega.Expect(readout).To(gomega.Equal("windows-volume-test"))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), readmsg)
|
||||
framework.ExpectNoError(err, readmsg)
|
||||
}
|
||||
|
||||
func testPodWithROVolume(podName string, source v1.VolumeSource, path string) *v1.Pod {
|
||||
|
|
Loading…
Reference in New Issue