mirror of https://github.com/k3s-io/k3s
fix golint error in test/e2e/scheduling
parent
4cb4864487
commit
ccecc67a5b
|
@ -605,7 +605,6 @@ test/e2e/common
|
|||
test/e2e/framework
|
||||
test/e2e/lifecycle/bootstrap
|
||||
test/e2e/scalability
|
||||
test/e2e/scheduling
|
||||
test/e2e/storage/drivers
|
||||
test/e2e/storage/testsuites
|
||||
test/e2e/storage/utils
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
// ensure libs have a chance to initialize
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
@ -92,7 +93,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
|||
nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName}
|
||||
|
||||
ginkgo.By("One pod should be scheduled, the other should be rejected")
|
||||
// CreateNodeSelectorPods creates RC with host port 4312
|
||||
// CreateNodeSelectorPods creates RC with host port 4321
|
||||
WaitForSchedulerAfterAction(f, func() error {
|
||||
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
|
||||
return err
|
||||
|
@ -269,6 +270,7 @@ func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]str
|
|||
}
|
||||
}
|
||||
|
||||
// CreateNodeSelectorPods creates RC with host port 4321 and defines node selector
|
||||
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
|
||||
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ package scheduling
|
|||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-scheduling] "+text, body)
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ const (
|
|||
|
||||
var (
|
||||
gpuResourceName v1.ResourceName
|
||||
dsYamlUrl string
|
||||
dsYamlURL string
|
||||
)
|
||||
|
||||
func makeCudaAdditionDevicePluginTestPod() *v1.Pod {
|
||||
|
@ -116,20 +116,21 @@ func getGPUsAvailable(f *framework.Framework) int64 {
|
|||
return gpusAvailable
|
||||
}
|
||||
|
||||
// SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes
|
||||
func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer {
|
||||
logOSImages(f)
|
||||
|
||||
dsYamlUrlFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
|
||||
if dsYamlUrlFromEnv != "" {
|
||||
dsYamlUrl = dsYamlUrlFromEnv
|
||||
dsYamlURLFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
|
||||
if dsYamlURLFromEnv != "" {
|
||||
dsYamlURL = dsYamlURLFromEnv
|
||||
} else {
|
||||
dsYamlUrl = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
|
||||
dsYamlURL = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
|
||||
}
|
||||
gpuResourceName = gpu.NVIDIAGPUResourceName
|
||||
|
||||
e2elog.Logf("Using %v", dsYamlUrl)
|
||||
e2elog.Logf("Using %v", dsYamlURL)
|
||||
// Creates the DaemonSet that installs Nvidia Drivers.
|
||||
ds, err := framework.DsFromManifest(dsYamlUrl)
|
||||
ds, err := framework.DsFromManifest(dsYamlURL)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ds.Namespace = f.Namespace.Name
|
||||
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
|
||||
|
|
|
@ -36,6 +36,7 @@ import (
|
|||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
// ensure libs have a chance to initialize
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
@ -727,9 +728,8 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched
|
|||
if !printed {
|
||||
printed = true
|
||||
return msg
|
||||
} else {
|
||||
return ""
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
|
||||
|
@ -746,9 +746,8 @@ func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expected
|
|||
if !printed {
|
||||
printed = true
|
||||
return msg
|
||||
} else {
|
||||
return ""
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
|
||||
|
@ -775,6 +774,7 @@ func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, strin
|
|||
return pod.Spec.NodeName, pod.Name
|
||||
}
|
||||
|
||||
// GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it
|
||||
func GetNodeThatCanRunPod(f *framework.Framework) string {
|
||||
ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
|
||||
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"})
|
||||
|
@ -785,6 +785,7 @@ func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
|
|||
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"})
|
||||
}
|
||||
|
||||
// CreateHostPortPods creates RC with host port 4321
|
||||
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
|
||||
ginkgo.By(fmt.Sprintf("Running RC which reserves host port"))
|
||||
config := &testutils.RCConfig{
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
// ensure libs have a chance to initialize
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
// ensure libs have a chance to initialize
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -40,14 +41,15 @@ import (
|
|||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// Resource is a collection of compute resource.
|
||||
type Resource struct {
|
||||
MilliCPU int64
|
||||
Memory int64
|
||||
}
|
||||
|
||||
var balancePodLabel map[string]string = map[string]string{"name": "priority-balanced-memory"}
|
||||
var balancePodLabel = map[string]string{"name": "priority-balanced-memory"}
|
||||
|
||||
var podRequestedResource *v1.ResourceRequirements = &v1.ResourceRequirements{
|
||||
var podRequestedResource = &v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
|
@ -265,7 +267,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
|
|||
var cpuFractionMap = make(map[string]float64)
|
||||
var memFractionMap = make(map[string]float64)
|
||||
for _, node := range nodes {
|
||||
cpuFraction, memFraction := computeCpuMemFraction(cs, node, requestedResource)
|
||||
cpuFraction, memFraction := computeCPUMemFraction(cs, node, requestedResource)
|
||||
cpuFractionMap[node.Name] = cpuFraction
|
||||
memFractionMap[node.Name] = memFraction
|
||||
if cpuFraction > maxCPUFraction {
|
||||
|
@ -311,15 +313,15 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
|
|||
|
||||
for _, node := range nodes {
|
||||
ginkgo.By("Compute Cpu, Mem Fraction after create balanced pods.")
|
||||
computeCpuMemFraction(cs, node, requestedResource)
|
||||
computeCPUMemFraction(cs, node, requestedResource)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
|
||||
e2elog.Logf("ComputeCpuMemFraction for node: %v", node.Name)
|
||||
totalRequestedCpuResource := resource.Requests.Cpu().MilliValue()
|
||||
func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
|
||||
e2elog.Logf("ComputeCPUMemFraction for node: %v", node.Name)
|
||||
totalRequestedCPUResource := resource.Requests.Cpu().MilliValue()
|
||||
totalRequestedMemResource := resource.Requests.Memory().Value()
|
||||
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -332,7 +334,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
|
|||
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
|
||||
continue
|
||||
}
|
||||
totalRequestedCpuResource += getNonZeroRequests(&pod).MilliCPU
|
||||
totalRequestedCPUResource += getNonZeroRequests(&pod).MilliCPU
|
||||
totalRequestedMemResource += getNonZeroRequests(&pod).Memory
|
||||
}
|
||||
}
|
||||
|
@ -341,7 +343,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
|
|||
cpuAllocatableMil := cpuAllocatable.MilliValue()
|
||||
|
||||
floatOne := float64(1)
|
||||
cpuFraction := float64(totalRequestedCpuResource) / float64(cpuAllocatableMil)
|
||||
cpuFraction := float64(totalRequestedCPUResource) / float64(cpuAllocatableMil)
|
||||
if cpuFraction > floatOne {
|
||||
cpuFraction = floatOne
|
||||
}
|
||||
|
@ -353,7 +355,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
|
|||
memFraction = floatOne
|
||||
}
|
||||
|
||||
e2elog.Logf("Node: %v, totalRequestedCpuResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCpuResource, cpuAllocatableMil, cpuFraction)
|
||||
e2elog.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction)
|
||||
e2elog.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
|
||||
|
||||
return cpuFraction, memFraction
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
// ensure libs have a chance to initialize
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -65,48 +66,46 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName,
|
|||
},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
if tolerationSeconds <= 0 {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{"group": podLabel},
|
||||
DeletionGracePeriodSeconds: &grace,
|
||||
// default - tolerate forever
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: "k8s.gcr.io/pause:3.1",
|
||||
},
|
||||
}
|
||||
if tolerationSeconds <= 0 {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{"group": podLabel},
|
||||
DeletionGracePeriodSeconds: &grace,
|
||||
// default - tolerate forever
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: "k8s.gcr.io/pause:3.1",
|
||||
},
|
||||
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
ts := int64(tolerationSeconds)
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{"group": podLabel},
|
||||
DeletionGracePeriodSeconds: &grace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: "k8s.gcr.io/pause:3.1",
|
||||
},
|
||||
},
|
||||
// default - tolerate forever
|
||||
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute, TolerationSeconds: &ts}},
|
||||
},
|
||||
}
|
||||
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}},
|
||||
},
|
||||
}
|
||||
}
|
||||
ts := int64(tolerationSeconds)
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{"group": podLabel},
|
||||
DeletionGracePeriodSeconds: &grace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: "k8s.gcr.io/pause:3.1",
|
||||
},
|
||||
},
|
||||
// default - tolerate forever
|
||||
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute, TolerationSeconds: &ts}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Creates and starts a controller (informer) that watches updates on a pod in given namespace with given name. It puts a new
|
||||
|
@ -141,8 +140,8 @@ func createTestController(cs clientset.Interface, observedDeletions chan string,
|
|||
}
|
||||
|
||||
const (
|
||||
KubeletPodDeletionDelaySeconds = 60
|
||||
AdditionalWaitPerDeleteSeconds = 5
|
||||
kubeletPodDeletionDelaySeconds = 60
|
||||
additionalWaitPerDeleteSeconds = 5
|
||||
)
|
||||
|
||||
// Tests the behavior of NoExecuteTaintManager. Following scenarios are included:
|
||||
|
@ -188,7 +187,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
|||
|
||||
// Wait a bit
|
||||
ginkgo.By("Waiting for Pod to be deleted")
|
||||
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
framework.Failf("Failed to evict Pod")
|
||||
|
@ -220,7 +219,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
|||
|
||||
// Wait a bit
|
||||
ginkgo.By("Waiting for Pod to be deleted")
|
||||
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
e2elog.Logf("Pod wasn't evicted. Test successful")
|
||||
|
@ -235,7 +234,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
|||
// 4. See if pod will get evicted after toleration time runs out
|
||||
ginkgo.It("eventually evict pod with finite tolerations from tainted nodes", func() {
|
||||
podName := "taint-eviction-3"
|
||||
pod := createPodForTaintsTest(true, KubeletPodDeletionDelaySeconds+2*AdditionalWaitPerDeleteSeconds, podName, podName, ns)
|
||||
pod := createPodForTaintsTest(true, kubeletPodDeletionDelaySeconds+2*additionalWaitPerDeleteSeconds, podName, podName, ns)
|
||||
observedDeletions := make(chan string, 100)
|
||||
stopCh := make(chan struct{})
|
||||
createTestController(cs, observedDeletions, stopCh, podName, ns)
|
||||
|
@ -253,7 +252,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
|||
|
||||
// Wait a bit
|
||||
ginkgo.By("Waiting to see if a Pod won't be deleted")
|
||||
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
e2elog.Logf("Pod wasn't evicted")
|
||||
|
@ -262,7 +261,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
|||
return
|
||||
}
|
||||
ginkgo.By("Waiting for Pod to be deleted")
|
||||
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||
timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
framework.Failf("Pod wasn't evicted")
|
||||
|
@ -279,7 +278,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
|||
// 5. See if Pod won't be evicted.
|
||||
ginkgo.It("removing taint cancels eviction", func() {
|
||||
podName := "taint-eviction-4"
|
||||
pod := createPodForTaintsTest(true, 2*AdditionalWaitPerDeleteSeconds, podName, podName, ns)
|
||||
pod := createPodForTaintsTest(true, 2*additionalWaitPerDeleteSeconds, podName, podName, ns)
|
||||
observedDeletions := make(chan string, 100)
|
||||
stopCh := make(chan struct{})
|
||||
createTestController(cs, observedDeletions, stopCh, podName, ns)
|
||||
|
@ -302,7 +301,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
|||
|
||||
// Wait a bit
|
||||
ginkgo.By("Waiting short time to make sure Pod is queued for deletion")
|
||||
timeoutChannel := time.NewTimer(AdditionalWaitPerDeleteSeconds).C
|
||||
timeoutChannel := time.NewTimer(additionalWaitPerDeleteSeconds).C
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
e2elog.Logf("Pod wasn't evicted. Proceeding")
|
||||
|
@ -314,7 +313,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
|||
framework.RemoveTaintOffNode(cs, nodeName, testTaint)
|
||||
taintRemoved = true
|
||||
ginkgo.By("Waiting some time to make sure that toleration time passed.")
|
||||
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||
timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C
|
||||
select {
|
||||
case <-timeoutChannel:
|
||||
e2elog.Logf("Pod wasn't evicted. Test successful")
|
||||
|
@ -372,7 +371,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
|||
|
||||
// Wait a bit
|
||||
ginkgo.By("Waiting for Pod1 to be deleted")
|
||||
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
|
||||
var evicted int
|
||||
for {
|
||||
select {
|
||||
|
@ -404,8 +403,8 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
|||
stopCh := make(chan struct{})
|
||||
createTestController(cs, observedDeletions, stopCh, podGroup, ns)
|
||||
|
||||
pod1 := createPodForTaintsTest(true, AdditionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns)
|
||||
pod2 := createPodForTaintsTest(true, 5*AdditionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns)
|
||||
pod1 := createPodForTaintsTest(true, additionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns)
|
||||
pod2 := createPodForTaintsTest(true, 5*additionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns)
|
||||
|
||||
ginkgo.By("Starting pods...")
|
||||
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
|
||||
|
@ -431,7 +430,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
|||
|
||||
// Wait a bit
|
||||
ginkgo.By("Waiting for Pod1 and Pod2 to be deleted")
|
||||
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C
|
||||
var evicted int
|
||||
for evicted != 2 {
|
||||
select {
|
||||
|
|
|
@ -59,7 +59,8 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
|
|||
})
|
||||
})
|
||||
|
||||
// Check that the pods comprising a service get spread evenly across available zones
|
||||
// SpreadServiceOrFail check that the pods comprising a service
|
||||
// get spread evenly across available zones
|
||||
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
|
||||
// First create the service
|
||||
serviceName := "test-service"
|
||||
|
@ -173,7 +174,8 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// Check that the pods comprising a replication controller get spread evenly across available zones
|
||||
// SpreadRCOrFail Check that the pods comprising a replication
|
||||
// controller get spread evenly across available zones
|
||||
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
|
||||
name := "ubelite-spread-rc-" + string(uuid.NewUUID())
|
||||
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
|
||||
|
|
|
@ -180,7 +180,8 @@ type staticPVTestConfig struct {
|
|||
pod *v1.Pod
|
||||
}
|
||||
|
||||
// Check that the pods using statically created PVs get scheduled to the same zone that the PV is in.
|
||||
// PodsUseStaticPVsOrFail Check that the pods using statically
|
||||
// created PVs get scheduled to the same zone that the PV is in.
|
||||
func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) {
|
||||
var err error
|
||||
c := f.ClientSet
|
||||
|
|
Loading…
Reference in New Issue