Use ExpectNoError() for scalability and scheduling

The e2e test framework has ExpectNoError() for readable test code.
This replaces Expect(err).NotTo(HaveOccurred()) with it.
k3s-v1.15.3
Kenichi Omichi 2019-05-15 00:54:53 +00:00
parent 4d3d153210
commit 48f1f74a51
8 changed files with 58 additions and 55 deletions

View File

@ -347,7 +347,7 @@ func createClients(numberOfClients int) ([]clientset.Interface, []scaleclient.Sc
for i := 0; i < numberOfClients; i++ {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
config.QPS = 100
config.Burst = 200
if framework.TestContext.KubeAPIContentType != "" {

View File

@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
systemPodsNo = 0
for _, pod := range systemPods {
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
}
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
for _, node := range nodeList.Items {
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)

View File

@ -58,18 +58,18 @@ var _ = SIGDescribe("LimitRange", func() {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for limitRanges")
framework.ExpectNoError(err, "failed to query for limitRanges")
gomega.Expect(len(limitRanges.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: limitRanges.ListMeta.ResourceVersion,
}
w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch")
framework.ExpectNoError(err, "failed to set up watch")
ginkgo.By("Submitting a LimitRange")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Verifying LimitRange creation was observed")
select {
@ -83,37 +83,37 @@ var _ = SIGDescribe("LimitRange", func() {
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Creating a Pod with no resource requirements")
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
}
ginkgo.By("Creating a Pod with partial resource requirements")
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// This is an interesting case, so it's worth a comment
// If you specify a Limit, and no Request, the Limit will default to the Request
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
@ -123,7 +123,7 @@ var _ = SIGDescribe("LimitRange", func() {
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
}
@ -141,19 +141,20 @@ var _ = SIGDescribe("LimitRange", func() {
newMin := getResourceList("9m", "49Mi", "49Gi")
limitRange.Spec.Limits[0].Min = newMin
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Verifying LimitRange updating is effective")
gomega.Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
err = wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
})).NotTo(gomega.HaveOccurred())
})
framework.ExpectNoError(err)
ginkgo.By("Creating a Pod with less than former min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
@ -162,10 +163,10 @@ var _ = SIGDescribe("LimitRange", func() {
ginkgo.By("Deleting a LimitRange")
err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Verifying the LimitRange was deleted")
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
err = wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
@ -190,12 +191,13 @@ var _ = SIGDescribe("LimitRange", func() {
return false, nil
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
})
framework.ExpectNoError(err, "kubelet never observed the termination notice")
ginkgo.By("Creating a Pod with more than former max resources")
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
})

View File

@ -131,7 +131,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
e2elog.Logf("Using %v", dsYamlURL)
// Creates the DaemonSet that installs Nvidia Drivers.
ds, err := framework.DsFromManifest(dsYamlURL)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ds.Namespace = f.Namespace.Name
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")

View File

@ -713,7 +713,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, n
predicate = scheduleSuccessEvent(ns, podName, "" /* any node */)
}
success, err := common.ObserveEventAfterAction(f, predicate, action)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(success).To(gomega.Equal(true))
}

View File

@ -82,7 +82,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
ginkgo.It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() {
@ -191,7 +191,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
return node.Annotations[v1.PreferAvoidPodsAnnotationKey] == string(val)
}
success, err := common.ObserveNodeUpdateAfterAction(f, nodeName, predicate, action)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(success).To(gomega.Equal(true))
defer framework.RemoveAvoidPodsOffNode(cs, nodeName)
@ -202,7 +202,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{
LabelSelector: "name=scheduler-priority-avoid-pod",
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Verify the pods should not scheduled to the node: %s", nodeName))
for _, pod := range testPods.Items {
gomega.Expect(pod.Spec.NodeName).NotTo(gomega.Equal(nodeName))
@ -235,7 +235,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.By("Pod should prefer scheduled to the node don't have the taint.")
tolePod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName))
ginkgo.By("Trying to apply 10 taint on the first node.")
@ -255,7 +255,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.")
tolePod, err = cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName))
})
})
@ -400,7 +400,7 @@ func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string,
},
}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(rc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
return rc
}

View File

@ -43,7 +43,7 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
if zoneCount <= 0 {
zoneCount, err = getZoneCount(f.ClientSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
@ -80,7 +80,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
},
}
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(serviceSpec)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Now create some pods behind the service
podSpec := &v1.Pod{
@ -107,11 +107,11 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
// Wait for all of them to be scheduled
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Now make sure they're spread across zones
zoneNames, err := framework.GetClusterZones(f.ClientSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true))
}
@ -139,7 +139,7 @@ func getZoneCount(c clientset.Interface) (int, error) {
func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) {
ginkgo.By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
return getZoneNameForNode(*node)
}
@ -155,7 +155,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str
continue
}
zoneName, err := getZoneNameForPod(c, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
podsPerZone[zoneName] = podsPerZone[zoneName] + 1
}
minPodsPerZone := math.MaxInt32
@ -205,7 +205,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
},
},
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
@ -216,15 +216,15 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
// List the pods, making sure we observe all the replicas.
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Wait for all of them to be scheduled
ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
pods, err = framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Now make sure they're spread across zones
zoneNames, err := framework.GetClusterZones(f.ClientSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true))
}

View File

@ -42,7 +42,7 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
framework.SkipUnlessProviderIs("gce", "gke")
if zoneCount <= 0 {
zoneCount, err = getZoneCount(f.ClientSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
@ -61,17 +61,17 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
// OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes
func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
gceCloud, err := gce.GetGCECloud()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Get all the zones that the nodes are in
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Expected zones: %v", expectedZones)
// Get all the zones in this current region
region := gceCloud.Region()
allZonesInRegion, err := gceCloud.ListZonesInRegion(region)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
var extraZone string
for _, zone := range allZonesInRegion {
@ -117,13 +117,13 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
}
err = gceCloud.InsertInstance(project, zone, rb)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer func() {
// Teardown of the compute instance
e2elog.Logf("Deleting compute resource: %v", name)
err := gceCloud.DeleteInstance(project, zone, name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}()
ginkgo.By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes")
@ -136,7 +136,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
for index := 1; index <= zoneCount+1; index++ {
pvc := newNamedDefaultClaim(ns, index)
pvc, err = framework.CreatePVC(c, ns, pvc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pvcList = append(pvcList, pvc)
// Defer the cleanup
@ -152,7 +152,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
// Wait for all claims bound
for _, claim := range pvcList {
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
pvZones := sets.NewString()
@ -160,11 +160,12 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
for _, claim := range pvcList {
// Get a new copy of the claim to have all fields populated
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Get the related PV
pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain]
gomega.Expect(ok).To(gomega.BeTrue(), "PV has no LabelZone to be found")
@ -188,7 +189,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
ns := f.Namespace.Name
zones, err := framework.GetClusterZones(c)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
zonelist := zones.List()
ginkgo.By("Creating static PVs across zones")
configs := make([]*staticPVTestConfig, podCount)
@ -205,14 +206,14 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns)
framework.PVPVCCleanup(c, ns, config.pv, config.pvc)
err = framework.DeletePVSource(config.pvSource)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
}()
for i, config := range configs {
zone := zonelist[i%len(zones)]
config.pvSource, err = framework.CreatePVSource(zone)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "multizone-pv",
@ -223,7 +224,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className}
config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
ginkgo.By("Waiting for all PVCs to be bound")
@ -235,13 +236,13 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
for _, config := range configs {
podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
config.pod, err = c.CoreV1().Pods(ns).Create(podConfig)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
ginkgo.By("Waiting for all pods to be running")
for _, config := range configs {
err = framework.WaitForPodRunningInNamespace(c, config.pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
}