remove dot imports in e2e/scheduling

k3s-v1.15.3
danielqsj 2019-05-10 11:43:18 +08:00
parent 44301940ea
commit 15a4342fe8
10 changed files with 303 additions and 303 deletions

View File

@ -31,8 +31,8 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var ns string
f := framework.NewDefaultFramework("equivalence-cache")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
@ -61,7 +61,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
@ -70,7 +70,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
}
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, node := range nodeList.Items {
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
@ -83,15 +83,15 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// When a replica pod (with HostPorts) is scheduled to a node, it will invalidate GeneralPredicates cache on this node,
// so that subsequent replica pods with same host port claim will be rejected.
// We enforce all replica pods bind to the same node so there will always be conflicts.
It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
By("Launching a RC with two replica pods with HostPorts")
ginkgo.It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
ginkgo.By("Launching a RC with two replica pods with HostPorts")
nodeName := getNodeThatCanRunPodWithoutToleration(f)
rcName := "host-port"
// bind all replicas to same node
nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName}
By("One pod should be scheduled, the other should be rejected")
ginkgo.By("One pod should be scheduled, the other should be rejected")
// CreateNodeSelectorPods creates RC with host port 4312
WaitForSchedulerAfterAction(f, func() error {
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
@ -105,11 +105,11 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// This test verifies that MatchInterPodAffinity works as expected.
// In equivalence cache, it does not handle inter pod affinity (anti-affinity) specially (unless node label changed),
// because current predicates algorithm will ensure newly scheduled pod does not break existing affinity in cluster.
It("validates pod affinity works properly when new replica pod is scheduled", func() {
ginkgo.It("validates pod affinity works properly when new replica pod is scheduled", func() {
// create a pod running with label {security: S1}, and choose this node
nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)
By("Trying to apply a random label on the found node.")
ginkgo.By("Trying to apply a random label on the found node.")
// we need to use real failure domains, since scheduler only know them
k := "failure-domain.beta.kubernetes.io/zone"
v := "equivalence-e2e-test"
@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// restore the node label
defer framework.AddOrUpdateLabelOnNode(cs, nodeName, k, oldValue)
By("Trying to schedule RC with Pod Affinity should success.")
ginkgo.By("Trying to schedule RC with Pod Affinity should success.")
framework.WaitForStableCluster(cs, masterNodes)
affinityRCName := "with-pod-affinity-" + string(uuid.NewUUID())
replica := 2
@ -154,10 +154,10 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController")))
By("Remove node failure domain label")
ginkgo.By("Remove node failure domain label")
framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
ginkgo.By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
// use scale to create another equivalent pod and wait for failure event
WaitForSchedulerAfterAction(f, func() error {
err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
@ -168,17 +168,17 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
})
// This test verifies that MatchInterPodAffinity (anti-affinity) is respected as expected.
It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
By("Launching two pods on two distinct nodes to get two node names")
ginkgo.It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
ginkgo.By("Launching two pods on two distinct nodes to get two node names")
CreateHostPortPods(f, "host-port", 2, true)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port")
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
framework.ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(2))
gomega.Expect(len(podList.Items)).To(gomega.Equal(2))
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
Expect(nodeNames[0]).ToNot(Equal(nodeNames[1]))
gomega.Expect(nodeNames[0]).ToNot(gomega.Equal(nodeNames[1]))
By("Applying a random label to both nodes.")
ginkgo.By("Applying a random label to both nodes.")
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
v := "equivalence-e2etest"
for _, nodeName := range nodeNames {
@ -187,7 +187,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
defer framework.RemoveLabelOffNode(cs, nodeName, k)
}
By("Trying to launch a pod with the service label on the selected nodes.")
ginkgo.By("Trying to launch a pod with the service label on the selected nodes.")
// run a pod with label {"service": "S1"} and expect it to be running
runPausePod(f, pausePodConfig{
Name: "with-label-" + string(uuid.NewUUID()),
@ -195,7 +195,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
NodeSelector: map[string]string{k: v}, // only launch on our two nodes
})
By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
ginkgo.By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
labelRCName := "with-podantiaffinity-" + string(uuid.NewUUID())
replica := 2
labelsMap := map[string]string{
@ -270,7 +270,7 @@ func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]str
}
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
config := &testutils.RCConfig{
Client: f.ClientSet,

View File

@ -30,8 +30,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -41,8 +41,8 @@ const (
var _ = SIGDescribe("LimitRange", func() {
f := framework.NewDefaultFramework("limitrange")
It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
By("Creating a LimitRange")
ginkgo.It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
ginkgo.By("Creating a LimitRange")
min := getResourceList("50m", "100Mi", "100Gi")
max := getResourceList("500m", "500Mi", "500Gi")
@ -54,24 +54,24 @@ var _ = SIGDescribe("LimitRange", func() {
defaultLimit, defaultRequest,
maxLimitRequestRatio)
By("Setting up watch")
ginkgo.By("Setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for limitRanges")
Expect(len(limitRanges.Items)).To(Equal(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for limitRanges")
gomega.Expect(len(limitRanges.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: limitRanges.ListMeta.ResourceVersion,
}
w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch")
By("Submitting a LimitRange")
ginkgo.By("Submitting a LimitRange")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Verifying LimitRange creation was observed")
ginkgo.By("Verifying LimitRange creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
@ -81,39 +81,39 @@ var _ = SIGDescribe("LimitRange", func() {
framework.Failf("Timeout while waiting for LimitRange creation")
}
By("Fetching the LimitRange to ensure it has proper values")
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Creating a Pod with no resource requirements")
ginkgo.By("Creating a Pod with no resource requirements")
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Ensuring Pod has resource requirements applied from LimitRange")
ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
By("Creating a Pod with partial resource requirements")
ginkgo.By("Creating a Pod with partial resource requirements")
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Ensuring Pod has merged resource requirements applied from LimitRange")
ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// This is an interesting case, so it's worth a comment
// If you specify a Limit, and no Request, the Limit will default to the Request
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
@ -123,49 +123,49 @@ var _ = SIGDescribe("LimitRange", func() {
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
By("Failing to create a Pod with less than min resources")
ginkgo.By("Failing to create a Pod with less than min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
gomega.Expect(err).To(gomega.HaveOccurred())
By("Failing to create a Pod with more than max resources")
ginkgo.By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
gomega.Expect(err).To(gomega.HaveOccurred())
By("Updating a LimitRange")
ginkgo.By("Updating a LimitRange")
newMin := getResourceList("9m", "49Mi", "49Gi")
limitRange.Spec.Limits[0].Min = newMin
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Verifying LimitRange updating is effective")
Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
ginkgo.By("Verifying LimitRange updating is effective")
gomega.Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
})).NotTo(HaveOccurred())
})).NotTo(gomega.HaveOccurred())
By("Creating a Pod with less than former min resources")
ginkgo.By("Creating a Pod with less than former min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Failing to create a Pod with more than max resources")
ginkgo.By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
gomega.Expect(err).To(gomega.HaveOccurred())
By("Deleting a LimitRange")
ginkgo.By("Deleting a LimitRange")
err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Verifying the LimitRange was deleted")
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
ginkgo.By("Verifying the LimitRange was deleted")
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
@ -190,12 +190,12 @@ var _ = SIGDescribe("LimitRange", func() {
return false, nil
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
By("Creating a Pod with more than former max resources")
ginkgo.By("Creating a Pod with more than former max resources")
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
})

View File

@ -30,8 +30,8 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -130,7 +130,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
e2elog.Logf("Using %v", dsYamlUrl)
// Creates the DaemonSet that installs Nvidia Drivers.
ds, err := framework.DsFromManifest(dsYamlUrl)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ds.Namespace = f.Namespace.Name
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
@ -155,9 +155,9 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
// Wait for Nvidia GPUs to be available on nodes
e2elog.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
Eventually(func() bool {
gomega.Eventually(func() bool {
return areGPUsAvailableOnAllSchedulableNodes(f)
}, driverInstallTimeout, time.Second).Should(BeTrue())
}, driverInstallTimeout, time.Second).Should(gomega.BeTrue())
return rsgather
}
@ -185,7 +185,7 @@ func testNvidiaGPUs(f *framework.Framework) {
var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() {
f := framework.NewDefaultFramework("device-plugin-gpus")
It("run Nvidia GPU Device Plugin tests", func() {
ginkgo.It("run Nvidia GPU Device Plugin tests", func() {
testNvidiaGPUs(f)
})
})

View File

@ -34,8 +34,8 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
@ -68,16 +68,16 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
var ns string
f := framework.NewDefaultFramework("sched-pred")
AfterEach(func() {
ginkgo.AfterEach(func() {
rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
if err == nil && *(rc.Spec.Replicas) != 0 {
By("Cleaning up the replication controller")
ginkgo.By("Cleaning up the replication controller")
err := framework.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName)
framework.ExpectNoError(err)
}
})
BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
@ -100,20 +100,20 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
//
// Slow PR #13315 (8 min)
It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() {
ginkgo.It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() {
totalPodCapacity = 0
for _, node := range nodeList.Items {
e2elog.Logf("Node: %v", node)
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
totalPodCapacity += podCapacity.Value()
}
currentlyScheduledPods := framework.WaitForStableCluster(cs, masterNodes)
podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
ginkgo.By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
// As the pods are distributed randomly among nodes,
// it can easily happen that all nodes are satured
@ -137,7 +137,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage limits of pods is greater than machines capacity.
// It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func() {
ginkgo.It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func() {
framework.SkipUnlessServerVersionGTE(localStorageVersion, f.ClientSet.Discovery())
@ -146,7 +146,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
nodeToAllocatableMap := make(map[string]int64)
for _, node := range nodeList.Items {
allocatable, found := node.Status.Allocatable[v1.ResourceEphemeralStorage]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
if nodeMaxAllocatable < allocatable.MilliValue() {
nodeMaxAllocatable = allocatable.MilliValue()
@ -174,7 +174,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
podsNeededForSaturation += (int)(leftAllocatable / milliEphemeralStoragePerPod)
}
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster local ephemeral resource and trying to start another one", podsNeededForSaturation))
ginkgo.By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster local ephemeral resource and trying to start another one", podsNeededForSaturation))
// As the pods are distributed randomly among nodes,
// it can easily happen that all nodes are saturated
@ -245,7 +245,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
// Find allocatable amount of CPU.
allocatable, found := node.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
if nodeMaxAllocatable < allocatable.MilliValue() {
nodeMaxAllocatable = allocatable.MilliValue()
@ -268,7 +268,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}
}
By("Starting Pods to consume most of the cluster CPU.")
ginkgo.By("Starting Pods to consume most of the cluster CPU.")
// Create one pod per node that requires 70% of the node remaining CPU.
fillerPods := []*v1.Pod{}
for nodeName, cpu := range nodeToAllocatableMap {
@ -306,7 +306,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
for _, pod := range fillerPods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Creating another pod that requires unavailable amount of CPU.")
ginkgo.By("Creating another pod that requires unavailable amount of CPU.")
// Create another pod that requires 50% of the largest node CPU resources.
// This pod should remain pending as at least 70% of CPU of other nodes in
// the cluster are already consumed.
@ -332,7 +332,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
Description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled.
*/
framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func() {
By("Trying to schedule Pod with nonempty NodeSelector.")
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"
framework.WaitForStableCluster(cs, masterNodes)
@ -357,14 +357,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ConformanceIt("validates that NodeSelector is respected if matching ", func() {
nodeName := GetNodeThatCanRunPod(f)
By("Trying to apply a random label on the found node.")
ginkgo.By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "42"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to relaunch the pod, now with labels.")
ginkgo.By("Trying to relaunch the pod, now with labels.")
labelPodName := "with-labels"
createPausePod(f, pausePodConfig{
Name: labelPodName,
@ -381,13 +381,13 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName))
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
})
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
It("validates that NodeAffinity is respected if not matching", func() {
By("Trying to schedule Pod with nonempty NodeSelector.")
ginkgo.It("validates that NodeAffinity is respected if not matching", func() {
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"
framework.WaitForStableCluster(cs, masterNodes)
@ -427,17 +427,17 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// Keep the same steps with the test on NodeSelector,
// but specify Affinity in Pod.Spec.Affinity, instead of NodeSelector.
It("validates that required NodeAffinity setting is respected if matching", func() {
ginkgo.It("validates that required NodeAffinity setting is respected if matching", func() {
nodeName := GetNodeThatCanRunPod(f)
By("Trying to apply a random label on the found node.")
ginkgo.By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "42"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to relaunch the pod, now with labels.")
ginkgo.By("Trying to relaunch the pod, now with labels.")
labelPodName := "with-labels"
createPausePod(f, pausePodConfig{
Name: labelPodName,
@ -468,17 +468,17 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName))
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
})
// 1. Run a pod to get an available node, then delete the pod
// 2. Taint the node with a random taint
// 3. Try to relaunch the pod with tolerations tolerate the taints on node,
// and the pod's nodeName specified to the name of node found in step 1
It("validates that taints-tolerations is respected if matching", func() {
ginkgo.It("validates that taints-tolerations is respected if matching", func() {
nodeName := getNodeThatCanRunPodWithoutToleration(f)
By("Trying to apply a random taint on the found node.")
ginkgo.By("Trying to apply a random taint on the found node.")
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
@ -488,14 +488,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
By("Trying to apply a random label on the found node.")
ginkgo.By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
labelValue := "testing-label-value"
framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
By("Trying to relaunch the pod, now with tolerations.")
ginkgo.By("Trying to relaunch the pod, now with tolerations.")
tolerationPodName := "with-tolerations"
createPausePod(f, pausePodConfig{
Name: tolerationPodName,
@ -511,17 +511,17 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName))
deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(deployedPod.Spec.NodeName).To(Equal(nodeName))
gomega.Expect(deployedPod.Spec.NodeName).To(gomega.Equal(nodeName))
})
// 1. Run a pod to get an available node, then delete the pod
// 2. Taint the node with a random taint
// 3. Try to relaunch the pod still no tolerations,
// and the pod's nodeName specified to the name of node found in step 1
It("validates that taints-tolerations is respected if not matching", func() {
ginkgo.It("validates that taints-tolerations is respected if not matching", func() {
nodeName := getNodeThatCanRunPodWithoutToleration(f)
By("Trying to apply a random taint on the found node.")
ginkgo.By("Trying to apply a random taint on the found node.")
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
@ -531,14 +531,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
By("Trying to apply a random label on the found node.")
ginkgo.By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
labelValue := "testing-label-value"
framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
By("Trying to relaunch the pod, still no tolerations.")
ginkgo.By("Trying to relaunch the pod, still no tolerations.")
podNameNoTolerations := "still-no-tolerations"
conf := pausePodConfig{
Name: podNameNoTolerations,
@ -548,17 +548,17 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podNameNoTolerations, false)
verifyResult(cs, 0, 1, ns)
By("Removing taint off the node")
ginkgo.By("Removing taint off the node")
WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), ns, podNameNoTolerations, true)
verifyResult(cs, 1, 0, ns)
})
It("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() {
ginkgo.It("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() {
nodeName := GetNodeThatCanRunPod(f)
// use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
By("Trying to apply a random label on the found node.")
ginkgo.By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "90"
@ -570,21 +570,21 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
defer framework.RemoveLabelOffNode(cs, nodeName, k)
port := int32(54321)
By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port))
ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port))
createHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true)
By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port))
ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port))
createHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true)
By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port))
ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port))
createHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true)
})
It("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() {
ginkgo.It("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() {
nodeName := GetNodeThatCanRunPod(f)
// use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
By("Trying to apply a random label on the found node.")
ginkgo.By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "95"
@ -596,10 +596,10 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
defer framework.RemoveLabelOffNode(cs, nodeName, k)
port := int32(54322)
By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port))
ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port))
createHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true)
By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port))
ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port))
createHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false)
})
})
@ -664,7 +664,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
// scheduled onto it.
pod := runPausePod(f, conf)
By("Explicitly delete pod here to free the resource it takes.")
ginkgo.By("Explicitly delete pod here to free the resource it takes.")
err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
@ -712,8 +712,8 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, n
predicate = scheduleSuccessEvent(ns, podName, "" /* any node */)
}
success, err := common.ObserveEventAfterAction(f, predicate, action)
Expect(err).NotTo(HaveOccurred())
Expect(success).To(Equal(true))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(success).To(gomega.Equal(true))
}
// TODO: upgrade calls in PodAffinity tests when we're able to run them
@ -732,8 +732,8 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched
}
}
Expect(len(notScheduledPods)).To(Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
Expect(len(scheduledPods)).To(Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
gomega.Expect(len(scheduledPods)).To(gomega.Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
}
// verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC
@ -751,8 +751,8 @@ func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expected
}
}
Expect(len(notScheduledPods)).To(Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
Expect(len(scheduledPods)).To(Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
gomega.Expect(len(scheduledPods)).To(gomega.Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
}
func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList {
@ -767,7 +767,7 @@ func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, strin
// not just take the node list and choose the first of them. Depending on the
// cluster and the scheduler it might be that a "normal" pod cannot be
// scheduled onto it.
By("Trying to launch a pod with a label to get a node which can launch it.")
ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{
Name: "with-label-" + string(uuid.NewUUID()),
Labels: map[string]string{"security": "S1"},
@ -776,17 +776,17 @@ func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, strin
}
func GetNodeThatCanRunPod(f *framework.Framework) string {
By("Trying to launch a pod without a label to get a node which can launch it.")
ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"})
}
func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
By("Trying to launch a pod without a toleration to get a node which can launch it.")
ginkgo.By("Trying to launch a pod without a toleration to get a node which can launch it.")
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"})
}
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port"))
ginkgo.By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{
Client: f.ClientSet,
Name: id,

View File

@ -37,8 +37,8 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
@ -63,19 +63,19 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
{name: highPriorityClassName, value: highPriority},
}
AfterEach(func() {
ginkgo.AfterEach(func() {
for _, pair := range priorityPairs {
cs.SchedulingV1().PriorityClasses().Delete(pair.name, metav1.NewDeleteOptions(0))
}
})
BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &corev1.NodeList{}
for _, pair := range priorityPairs {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
}
framework.WaitForAllNodesHealthy(cs, time.Minute)
@ -88,17 +88,17 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// This test verifies that when a higher priority pod is created and no node with
// enough resources is found, scheduler preempts a lower priority pod to schedule
// the high priority pod.
It("validates basic preemption works", func() {
ginkgo.It("validates basic preemption works", func() {
var podRes corev1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
By("Create pods that use 60% of node resources.")
ginkgo.By("Create pods that use 60% of node resources.")
pods := make([]*corev1.Pod, len(nodeList.Items))
for i, node := range nodeList.Items {
cpuAllocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
milliCPU := cpuAllocatable.MilliValue() * 40 / 100
memAllocatable, found := node.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
memory := memAllocatable.Value() * 60 / 100
podRes = corev1.ResourceList{}
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
@ -118,12 +118,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
})
e2elog.Logf("Created pod: %v", pods[i].Name)
}
By("Wait for pods to be scheduled.")
ginkgo.By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a high priority pod that use 60% of a node resources.")
ginkgo.By("Run a high priority pod that use 60% of a node resources.")
// Create a high priority pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
@ -136,29 +136,29 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
gomega.Expect(podDeleted).To(gomega.BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil())
}
})
// This test verifies that when a critical pod is created and no node with
// enough resources is found, scheduler preempts a lower priority pod to schedule
// this critical pod.
It("validates lower priority pod preemption by critical pod", func() {
ginkgo.It("validates lower priority pod preemption by critical pod", func() {
var podRes corev1.ResourceList
// Create one pod per node that uses a lot of the node's resources.
By("Create pods that use 60% of node resources.")
ginkgo.By("Create pods that use 60% of node resources.")
pods := make([]*corev1.Pod, len(nodeList.Items))
for i, node := range nodeList.Items {
cpuAllocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
milliCPU := cpuAllocatable.MilliValue() * 40 / 100
memAllocatable, found := node.Status.Allocatable["memory"]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
memory := memAllocatable.Value() * 60 / 100
podRes = corev1.ResourceList{}
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
@ -178,12 +178,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
})
e2elog.Logf("Created pod: %v", pods[i].Name)
}
By("Wait for pods to be scheduled.")
ginkgo.By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a critical pod that use 60% of a node resources.")
ginkgo.By("Run a critical pod that use 60% of a node resources.")
// Create a critical pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "critical-pod",
@ -202,12 +202,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}()
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
gomega.Expect(podDeleted).To(gomega.BeTrue())
// Other pods (mid priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil())
}
})
@ -216,10 +216,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// pod is preempted to allow the higher priority pod schedule.
// It also verifies that existing low priority pods are not preempted as their
// preemption wouldn't help.
It("validates pod anti-affinity works in preemption", func() {
ginkgo.It("validates pod anti-affinity works in preemption", func() {
var podRes corev1.ResourceList
// Create a few pods that uses a small amount of resources.
By("Create pods that use 10% of node resources.")
ginkgo.By("Create pods that use 10% of node resources.")
numPods := 4
if len(nodeList.Items) < numPods {
numPods = len(nodeList.Items)
@ -228,10 +228,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
for i := 0; i < numPods; i++ {
node := nodeList.Items[i]
cpuAllocatable, found := node.Status.Allocatable["cpu"]
Expect(found).To(BeTrue())
gomega.Expect(found).To(gomega.BeTrue())
milliCPU := cpuAllocatable.MilliValue() * 10 / 100
memAllocatable, found := node.Status.Allocatable["memory"]
Expect(found).To(BeTrue())
gomega.Expect(found).To(gomega.BeTrue())
memory := memAllocatable.Value() * 10 / 100
podRes = corev1.ResourceList{}
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
@ -294,12 +294,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}
}()
By("Wait for pods to be scheduled.")
ginkgo.By("Wait for pods to be scheduled.")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
By("Run a high priority pod with node affinity to the first node.")
ginkgo.By("Run a high priority pod with node affinity to the first node.")
// Create a high priority pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "preemptor-pod",
@ -327,12 +327,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
gomega.Expect(podDeleted).To(gomega.BeTrue())
// Other pods (low priority ones) should be present.
for i := 1; i < len(pods); i++ {
livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil())
}
})
})
@ -342,7 +342,7 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
var ns string
f := framework.NewDefaultFramework("sched-pod-priority")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
@ -351,9 +351,9 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
})
// This test verifies that system critical priorities are created automatically and resolved properly.
It("validates critical system priorities are created and resolved", func() {
ginkgo.It("validates critical system priorities are created and resolved", func() {
// Create pods that use system critical priorities and
By("Create pods that use critical system priorities.")
ginkgo.By("Create pods that use critical system priorities.")
systemPriorityClasses := []string{
scheduling.SystemNodeCritical, scheduling.SystemClusterCritical,
}
@ -368,7 +368,7 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
Expect(pod.Spec.Priority).NotTo(BeNil())
gomega.Expect(pod.Spec.Priority).NotTo(gomega.BeNil())
e2elog.Logf("Created pod: %v", pod.Name)
}
})
@ -386,9 +386,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
priorityPairs := make([]priorityPair, 0)
AfterEach(func() {
ginkgo.AfterEach(func() {
// print out additional info if tests failed
if CurrentGinkgoTestDescription().Failed {
if ginkgo.CurrentGinkgoTestDescription().Failed {
// list existing priorities
priorityList, err := cs.SchedulingV1().PriorityClasses().List(metav1.ListOptions{})
if err != nil {
@ -414,12 +414,12 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
}
})
BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
// find an available node
By("Finding an available node")
ginkgo.By("Finding an available node")
nodeName := GetNodeThatCanRunPod(f)
e2elog.Logf("found a healthy node: %s", nodeName)
@ -453,11 +453,11 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
e2elog.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err)
e2elog.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err)
}
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
}
})
It("runs ReplicaSets to verify preemption running path", func() {
ginkgo.It("runs ReplicaSets to verify preemption running path", func() {
podNamesSeen := make(map[string]struct{})
stopCh := make(chan struct{})

View File

@ -22,8 +22,8 @@ import (
"math"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
@ -66,10 +66,10 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
var ns string
f := framework.NewDefaultFramework("sched-priority")
AfterEach(func() {
ginkgo.AfterEach(func() {
})
BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
@ -80,18 +80,18 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() {
By("Trying to launch a pod with a label to get a node which can launch it.")
ginkgo.It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() {
ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{
Name: "pod-with-label-security-s1",
Labels: map[string]string{"security": "S1"},
})
nodeName := pod.Spec.NodeName
By("Trying to apply a label on the found node.")
ginkgo.By("Trying to apply a label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", "node-topologyKey")
v := "topologyvalue"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
@ -100,7 +100,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
// make the nodes have balanced cpu,mem usage
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6)
framework.ExpectNoError(err)
By("Trying to launch the pod with podAntiAffinity.")
ginkgo.By("Trying to launch the pod with podAntiAffinity.")
labelPodName := "pod-with-pod-antiaffinity"
pod = createPausePod(f, pausePodConfig{
Resources: podRequestedResource,
@ -136,20 +136,20 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
},
},
})
By("Wait the pod becomes running")
ginkgo.By("Wait the pod becomes running")
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
By("Verify the pod was scheduled to the expected node.")
Expect(labelPod.Spec.NodeName).NotTo(Equal(nodeName))
ginkgo.By("Verify the pod was scheduled to the expected node.")
gomega.Expect(labelPod.Spec.NodeName).NotTo(gomega.Equal(nodeName))
})
It("Pod should avoid nodes that have avoidPod annotation", func() {
ginkgo.It("Pod should avoid nodes that have avoidPod annotation", func() {
nodeName := nodeList.Items[0].Name
// make the nodes have balanced cpu,mem usage
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err)
By("Create a RC, with 0 replicas")
ginkgo.By("Create a RC, with 0 replicas")
rc := createRC(ns, "scheduler-priority-avoid-pod", int32(0), map[string]string{"name": "scheduler-priority-avoid-pod"}, f, podRequestedResource)
// Cleanup the replication controller when we are done.
defer func() {
@ -159,7 +159,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
}
}()
By("Trying to apply avoidPod annotations on the first node.")
ginkgo.By("Trying to apply avoidPod annotations on the first node.")
avoidPod := v1.AvoidPods{
PreferAvoidPods: []v1.PreferAvoidPodsEntry{
{
@ -189,30 +189,30 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
return node.Annotations[v1.PreferAvoidPodsAnnotationKey] == string(val)
}
success, err := common.ObserveNodeUpdateAfterAction(f, nodeName, predicate, action)
Expect(err).NotTo(HaveOccurred())
Expect(success).To(Equal(true))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(success).To(gomega.Equal(true))
defer framework.RemoveAvoidPodsOffNode(cs, nodeName)
By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1))
ginkgo.By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1))
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true)
testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{
LabelSelector: "name=scheduler-priority-avoid-pod",
})
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Verify the pods should not scheduled to the node: %s", nodeName))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Verify the pods should not scheduled to the node: %s", nodeName))
for _, pod := range testPods.Items {
Expect(pod.Spec.NodeName).NotTo(Equal(nodeName))
gomega.Expect(pod.Spec.NodeName).NotTo(gomega.Equal(nodeName))
}
})
It("Pod should be preferably scheduled to nodes pod can tolerate", func() {
ginkgo.It("Pod should be preferably scheduled to nodes pod can tolerate", func() {
// make the nodes have balanced cpu,mem usage ratio
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err)
//we need apply more taints on a node, because one match toleration only count 1
By("Trying to apply 10 taint on the nodes except first one.")
ginkgo.By("Trying to apply 10 taint on the nodes except first one.")
nodeName := nodeList.Items[0].Name
for index, node := range nodeList.Items {
@ -224,19 +224,19 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
defer framework.RemoveTaintOffNode(cs, node.Name, *testTaint)
}
}
By("Create a pod without any tolerations")
ginkgo.By("Create a pod without any tolerations")
tolerationPodName := "without-tolerations"
pod := createPausePod(f, pausePodConfig{
Name: tolerationPodName,
})
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("Pod should prefer scheduled to the node don't have the taint.")
ginkgo.By("Pod should prefer scheduled to the node don't have the taint.")
tolePod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(tolePod.Spec.NodeName).To(Equal(nodeName))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName))
By("Trying to apply 10 taint on the first node.")
ginkgo.By("Trying to apply 10 taint on the first node.")
var tolerations []v1.Toleration
for i := 0; i < 10; i++ {
testTaint := addRandomTaitToNode(cs, nodeName)
@ -244,17 +244,17 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
defer framework.RemoveTaintOffNode(cs, nodeName, *testTaint)
}
tolerationPodName = "with-tolerations"
By("Create a pod that tolerates all the taints of the first node.")
ginkgo.By("Create a pod that tolerates all the taints of the first node.")
pod = createPausePod(f, pausePodConfig{
Name: tolerationPodName,
Tolerations: tolerations,
})
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("Pod should prefer scheduled to the node that pod can tolerate.")
ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.")
tolePod, err = cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(tolePod.Spec.NodeName).To(Equal(nodeName))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName))
})
})
@ -279,11 +279,11 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
ratio = math.Max(maxCPUFraction, maxMemFraction)
for _, node := range nodes {
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
memAllocatableVal := memAllocatable.Value()
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
cpuAllocatableMil := cpuAllocatable.MilliValue()
needCreateResource := v1.ResourceList{}
@ -310,7 +310,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
}
for _, node := range nodes {
By("Compute Cpu, Mem Fraction after create balanced pods.")
ginkgo.By("Compute Cpu, Mem Fraction after create balanced pods.")
computeCpuMemFraction(cs, node, requestedResource)
}
@ -337,7 +337,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
}
}
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
cpuAllocatableMil := cpuAllocatable.MilliValue()
floatOne := float64(1)
@ -346,7 +346,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
cpuFraction = floatOne
}
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
Expect(found).To(Equal(true))
gomega.Expect(found).To(gomega.Equal(true))
memAllocatableVal := memAllocatable.Value()
memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal)
if memFraction > floatOne {
@ -398,7 +398,7 @@ func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string,
},
}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(rc)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return rc
}

View File

@ -28,7 +28,7 @@ import (
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
func newUnreachableNoExecuteTaint() *v1.Taint {
@ -52,7 +52,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
var cs clientset.Interface
var ns string
BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
// skip if TaintBasedEvictions is not enabled
@ -72,10 +72,10 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
// When network issue recovers, it's expected to see:
// 5. node lifecycle manager generate a status change: [NodeReady=true, status=ConditionTrue]
// 6. node.kubernetes.io/unreachable=:NoExecute taint is taken off the node
It("Checks that the node becomes unreachable", func() {
ginkgo.It("Checks that the node becomes unreachable", func() {
// find an available node
nodeName := GetNodeThatCanRunPod(f)
By("Finding an available node " + nodeName)
ginkgo.By("Finding an available node " + nodeName)
// pod0 is a pod with unschedulable=:NoExecute toleration, and tolerationSeconds=0s
// pod1 is a pod with unschedulable=:NoExecute toleration, and tolerationSeconds=200s
@ -83,7 +83,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
base := "taint-based-eviction"
tolerationSeconds := []int64{0, 200}
numPods := len(tolerationSeconds) + 1
By(fmt.Sprintf("Preparing %v pods", numPods))
ginkgo.By(fmt.Sprintf("Preparing %v pods", numPods))
pods := make([]*v1.Pod, numPods)
zero := int64(0)
// build pod0, pod1
@ -108,7 +108,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
NodeName: nodeName,
})
By("Verifying all pods are running properly")
ginkgo.By("Verifying all pods are running properly")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
@ -121,7 +121,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
}
node := nodeList.Items[0]
By(fmt.Sprintf("Blocking traffic from node %s to the master", nodeName))
ginkgo.By(fmt.Sprintf("Blocking traffic from node %s to the master", nodeName))
host, err := framework.GetNodeExternalIP(&node)
// TODO(Huang-Wei): make this case work for local provider
// if err != nil {
@ -132,19 +132,19 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
taint := newUnreachableNoExecuteTaint()
defer func() {
By(fmt.Sprintf("Unblocking traffic from node %s to the master", node.Name))
ginkgo.By(fmt.Sprintf("Unblocking traffic from node %s to the master", node.Name))
for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed {
if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.Failf("Current e2e test has failed, so return from here.")
return
}
By(fmt.Sprintf("Expecting to see node %q becomes Ready", nodeName))
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes Ready", nodeName))
framework.WaitForNodeToBeReady(cs, nodeName, time.Minute*1)
By("Expecting to see unreachable=:NoExecute taint is taken off")
ginkgo.By("Expecting to see unreachable=:NoExecute taint is taken off")
err := framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, false, time.Second*30)
framework.ExpectNoError(err)
}()
@ -153,15 +153,15 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
framework.BlockNetwork(host, masterAddress)
}
By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
if !framework.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {
framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
}
By("Expecting to see unreachable=:NoExecute taint is applied")
ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied")
err = framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, true, time.Second*30)
framework.ExpectNoError(err)
By("Expecting pod0 to be evicted immediately")
ginkgo.By("Expecting pod0 to be evicted immediately")
err = framework.WaitForPodCondition(cs, ns, pods[0].Name, "pod0 terminating", time.Second*15, func(pod *v1.Pod) (bool, error) {
// as node is unreachable, pod0 is expected to be in Terminating status
// rather than getting deleted
@ -172,7 +172,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
})
framework.ExpectNoError(err)
By("Expecting pod2 to be updated with a toleration with tolerationSeconds=300")
ginkgo.By("Expecting pod2 to be updated with a toleration with tolerationSeconds=300")
err = framework.WaitForPodCondition(cs, ns, pods[2].Name, "pod2 updated with tolerationSeconds=300", time.Second*15, func(pod *v1.Pod) (bool, error) {
if seconds, err := getTolerationSeconds(pod.Spec.Tolerations); err == nil {
return seconds == 300, nil
@ -181,7 +181,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
})
framework.ExpectNoError(err)
By("Expecting pod1 to be unchanged")
ginkgo.By("Expecting pod1 to be unchanged")
livePod1, err := cs.CoreV1().Pods(pods[1].Namespace).Get(pods[1].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
seconds, err := getTolerationSeconds(livePod1.Spec.Tolerations)

View File

@ -19,7 +19,7 @@ package scheduling
import (
"time"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
_ "github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
@ -155,7 +155,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
var ns string
f := framework.NewDefaultFramework("taint-single-pod")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
@ -168,26 +168,26 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// 1. Run a pod
// 2. Taint the node running this pod with a no-execute taint
// 3. See if pod will get evicted
It("evicts pods from tainted nodes", func() {
ginkgo.It("evicts pods from tainted nodes", func() {
podName := "taint-eviction-1"
pod := createPodForTaintsTest(false, 0, podName, podName, ns)
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Starting pod...")
ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit
By("Waiting for Pod to be deleted")
ginkgo.By("Waiting for Pod to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
@ -200,26 +200,26 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// 1. Run a pod with toleration
// 2. Taint the node running this pod with a no-execute taint
// 3. See if pod won't get evicted
It("doesn't evict pod with tolerations from tainted nodes", func() {
ginkgo.It("doesn't evict pod with tolerations from tainted nodes", func() {
podName := "taint-eviction-2"
pod := createPodForTaintsTest(true, 0, podName, podName, ns)
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Starting pod...")
ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit
By("Waiting for Pod to be deleted")
ginkgo.By("Waiting for Pod to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
@ -233,26 +233,26 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// 2. Taint the node running this pod with a no-execute taint
// 3. See if pod won't get evicted before toleration time runs out
// 4. See if pod will get evicted after toleration time runs out
It("eventually evict pod with finite tolerations from tainted nodes", func() {
ginkgo.It("eventually evict pod with finite tolerations from tainted nodes", func() {
podName := "taint-eviction-3"
pod := createPodForTaintsTest(true, KubeletPodDeletionDelaySeconds+2*AdditionalWaitPerDeleteSeconds, podName, podName, ns)
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Starting pod...")
ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit
By("Waiting to see if a Pod won't be deleted")
ginkgo.By("Waiting to see if a Pod won't be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
@ -261,7 +261,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
framework.Failf("Pod was evicted despite toleration")
return
}
By("Waiting for Pod to be deleted")
ginkgo.By("Waiting for Pod to be deleted")
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
@ -277,19 +277,19 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// 3. Wait some time
// 4. Remove the taint
// 5. See if Pod won't be evicted.
It("removing taint cancels eviction", func() {
ginkgo.It("removing taint cancels eviction", func() {
podName := "taint-eviction-4"
pod := createPodForTaintsTest(true, 2*AdditionalWaitPerDeleteSeconds, podName, podName, ns)
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Starting pod...")
ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
@ -301,7 +301,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
}()
// Wait a bit
By("Waiting short time to make sure Pod is queued for deletion")
ginkgo.By("Waiting short time to make sure Pod is queued for deletion")
timeoutChannel := time.NewTimer(AdditionalWaitPerDeleteSeconds).C
select {
case <-timeoutChannel:
@ -313,7 +313,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
e2elog.Logf("Removing taint from Node")
framework.RemoveTaintOffNode(cs, nodeName, testTaint)
taintRemoved = true
By("Waiting some time to make sure that toleration time passed.")
ginkgo.By("Waiting some time to make sure that toleration time passed.")
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
@ -329,7 +329,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
var ns string
f := framework.NewDefaultFramework("taint-multiple-pods")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
@ -342,7 +342,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
// 1. Run two pods; one with toleration, one without toleration
// 2. Taint the nodes running those pods with a no-execute taint
// 3. See if pod-without-toleration get evicted, and pod-with-toleration is kept
It("only evicts pods without tolerations from tainted nodes", func() {
ginkgo.It("only evicts pods without tolerations from tainted nodes", func() {
podGroup := "taint-eviction-a"
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
@ -351,7 +351,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
pod1 := createPodForTaintsTest(false, 0, podGroup+"1", podGroup, ns)
pod2 := createPodForTaintsTest(true, 0, podGroup+"2", podGroup, ns)
By("Starting pods...")
ginkgo.By("Starting pods...")
nodeName1, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName1)
@ -359,7 +359,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
framework.ExpectNoError(err)
e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName2)
By("Trying to apply a taint on the Nodes")
ginkgo.By("Trying to apply a taint on the Nodes")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName1, &testTaint)
@ -371,7 +371,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
}
// Wait a bit
By("Waiting for Pod1 to be deleted")
ginkgo.By("Waiting for Pod1 to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
var evicted int
for {
@ -398,7 +398,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
// 1. Run two pods both with toleration; one with tolerationSeconds=5, the other with 25
// 2. Taint the nodes running those pods with a no-execute taint
// 3. See if both pods get evicted in between [5, 25] seconds
It("evicts pods with minTolerationSeconds", func() {
ginkgo.It("evicts pods with minTolerationSeconds", func() {
podGroup := "taint-eviction-b"
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
@ -407,7 +407,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
pod1 := createPodForTaintsTest(true, AdditionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns)
pod2 := createPodForTaintsTest(true, 5*AdditionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns)
By("Starting pods...")
ginkgo.By("Starting pods...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
@ -423,14 +423,14 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
framework.ExpectNoError(err)
e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit
By("Waiting for Pod1 and Pod2 to be deleted")
ginkgo.By("Waiting for Pod1 and Pod2 to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C
var evicted int
for evicted != 2 {

View File

@ -20,8 +20,8 @@ import (
"fmt"
"math"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -39,22 +39,22 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
var zoneCount int
var err error
image := framework.ServeHostnameImage
BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
if zoneCount <= 0 {
zoneCount, err = getZoneCount(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
framework.SkipUnlessAtLeast(zoneCount, 2, msg)
// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
})
It("should spread the pods of a service across zones", func() {
ginkgo.It("should spread the pods of a service across zones", func() {
SpreadServiceOrFail(f, (2*zoneCount)+1, image)
})
It("should spread the pods of a replication controller across zones", func() {
ginkgo.It("should spread the pods of a replication controller across zones", func() {
SpreadRCOrFail(f, int32((2*zoneCount)+1), image)
})
})
@ -79,7 +79,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
},
}
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(serviceSpec)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Now create some pods behind the service
podSpec := &v1.Pod{
@ -106,12 +106,12 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
// Wait for all of them to be scheduled
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := framework.GetClusterZones(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true))
}
// Find the name of the zone in which a Node is running
@ -136,9 +136,9 @@ func getZoneCount(c clientset.Interface) (int, error) {
// Find the name of the zone in which the pod is scheduled
func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) {
By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
ginkgo.By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return getZoneNameForNode(*node)
}
@ -154,7 +154,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str
continue
}
zoneName, err := getZoneNameForPod(c, pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podsPerZone[zoneName] = podsPerZone[zoneName] + 1
}
minPodsPerZone := math.MaxInt32
@ -167,7 +167,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str
maxPodsPerZone = podCount
}
}
Expect(minPodsPerZone).To(BeNumerically("~", maxPodsPerZone, 1),
gomega.Expect(minPodsPerZone).To(gomega.BeNumerically("~", maxPodsPerZone, 1),
"Pods were not evenly spread across zones. %d in one zone and %d in another zone",
minPodsPerZone, maxPodsPerZone)
return true, nil
@ -176,7 +176,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str
// Check that the pods comprising a replication controller get spread evenly across available zones
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
name := "ubelite-spread-rc-" + string(uuid.NewUUID())
By(fmt.Sprintf("Creating replication controller %s", name))
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
@ -203,7 +203,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
},
},
})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
@ -214,15 +214,15 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
// List the pods, making sure we observe all the replicas.
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for all of them to be scheduled
By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
pods, err = framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := framework.GetClusterZones(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true))
}

View File

@ -20,8 +20,8 @@ import (
"fmt"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
compute "google.golang.org/api/compute/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@ -38,22 +38,22 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
var zoneCount int
var err error
image := framework.ServeHostnameImage
BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
if zoneCount <= 0 {
zoneCount, err = getZoneCount(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
framework.SkipUnlessAtLeast(zoneCount, 2, msg)
// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
})
It("should schedule pods in the same zones as statically provisioned PVs", func() {
ginkgo.It("should schedule pods in the same zones as statically provisioned PVs", func() {
PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image)
})
It("should only be allowed to provision PDs in zones where nodes exist", func() {
ginkgo.It("should only be allowed to provision PDs in zones where nodes exist", func() {
OnlyAllowNodeZones(f, zoneCount, image)
})
})
@ -61,17 +61,17 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
// OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes
func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Get all the zones that the nodes are in
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Expected zones: %v", expectedZones)
// Get all the zones in this current region
region := gceCloud.Region()
allZonesInRegion, err := gceCloud.ListZonesInRegion(region)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
var extraZone string
for _, zone := range allZonesInRegion {
@ -80,9 +80,9 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
break
}
}
Expect(extraZone).NotTo(Equal(""), fmt.Sprintf("No extra zones available in region %s", region))
gomega.Expect(extraZone).NotTo(gomega.Equal(""), fmt.Sprintf("No extra zones available in region %s", region))
By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone))
ginkgo.By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone))
project := framework.TestContext.CloudConfig.ProjectID
zone := extraZone
myuuid := string(uuid.NewUUID())
@ -117,16 +117,16 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
}
err = gceCloud.InsertInstance(project, zone, rb)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
// Teardown of the compute instance
e2elog.Logf("Deleting compute resource: %v", name)
err := gceCloud.DeleteInstance(project, zone, name)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes")
ginkgo.By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes")
// Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1
// This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees"
var pvcList []*v1.PersistentVolumeClaim
@ -136,7 +136,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
for index := 1; index <= zoneCount+1; index++ {
pvc := newNamedDefaultClaim(ns, index)
pvc, err = framework.CreatePVC(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvcList = append(pvcList, pvc)
// Defer the cleanup
@ -152,25 +152,25 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
// Wait for all claims bound
for _, claim := range pvcList {
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
pvZones := sets.NewString()
By("Checking that PDs have been provisioned in only the expected zones")
ginkgo.By("Checking that PDs have been provisioned in only the expected zones")
for _, claim := range pvcList {
// Get a new copy of the claim to have all fields populated
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Get the related PV
pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain]
Expect(ok).To(BeTrue(), "PV has no LabelZone to be found")
gomega.Expect(ok).To(gomega.BeTrue(), "PV has no LabelZone to be found")
pvZones.Insert(pvZone)
}
Expect(pvZones.Equal(expectedZones)).To(BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones))
gomega.Expect(pvZones.Equal(expectedZones)).To(gomega.BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones))
}
type staticPVTestConfig struct {
@ -187,16 +187,16 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
ns := f.Namespace.Name
zones, err := framework.GetClusterZones(c)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
zonelist := zones.List()
By("Creating static PVs across zones")
ginkgo.By("Creating static PVs across zones")
configs := make([]*staticPVTestConfig, podCount)
for i := range configs {
configs[i] = &staticPVTestConfig{}
}
defer func() {
By("Cleaning up pods and PVs")
ginkgo.By("Cleaning up pods and PVs")
for _, config := range configs {
framework.DeletePodOrFail(c, ns, config.pod.Name)
}
@ -204,14 +204,14 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns)
framework.PVPVCCleanup(c, ns, config.pv, config.pvc)
err = framework.DeletePVSource(config.pvSource)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}()
for i, config := range configs {
zone := zonelist[i%len(zones)]
config.pvSource, err = framework.CreatePVSource(zone)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "multizone-pv",
@ -222,25 +222,25 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className}
config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
By("Waiting for all PVCs to be bound")
ginkgo.By("Waiting for all PVCs to be bound")
for _, config := range configs {
framework.WaitOnPVandPVC(c, ns, config.pv, config.pvc)
}
By("Creating pods for each static PV")
ginkgo.By("Creating pods for each static PV")
for _, config := range configs {
podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
config.pod, err = c.CoreV1().Pods(ns).Create(podConfig)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
By("Waiting for all pods to be running")
ginkgo.By("Waiting for all pods to be running")
for _, config := range configs {
err = framework.WaitForPodRunningInNamespace(c, config.pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}