mirror of https://github.com/k3s-io/k3s
Increase certain waiting time window in gpu_device_plugin e2e_node test.
Kubelet restart process seems to get a bit slower recently. From running the gpu_device_plugin e2e_node test on GCE, I saw it took ~37 seconds for kubelet to start CM DeviceManager after it restarts, and then took ~12 seconds for the gpu device plugin to re-register. As the result, this e2e_node test fails because the current 10 sec waiting time is too small. Restarting a container also seems to get slower that it sometimes exceeds the current 2 min waiting time in ensurePodContainerRestart(). This change increase both waiting time to 5 min to leave enough space on slower machines.pull/8/head
parent
0d9c432542
commit
265f3a48d3
|
@ -235,7 +235,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
|
||||||
currentCount = p.Status.ContainerStatuses[0].RestartCount
|
currentCount = p.Status.ContainerStatuses[0].RestartCount
|
||||||
framework.Logf("initial %v, current %v", initialCount, currentCount)
|
framework.Logf("initial %v, current %v", initialCount, currentCount)
|
||||||
return currentCount > initialCount
|
return currentCount > initialCount
|
||||||
}, 2*time.Minute, framework.Poll).Should(BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseLog returns the matching string for the specified regular expression parsed from the container logs.
|
// parseLog returns the matching string for the specified regular expression parsed from the container logs.
|
||||||
|
|
|
@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||||
By("Waiting for GPUs to become available on the local node")
|
By("Waiting for GPUs to become available on the local node")
|
||||||
Eventually(func() bool {
|
Eventually(func() bool {
|
||||||
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
||||||
}, 10*time.Second, framework.Poll).Should(BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||||
|
|
||||||
if framework.NumberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
|
if framework.NumberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
|
||||||
Skip("Not enough GPUs to execute this test (at least two needed)")
|
Skip("Not enough GPUs to execute this test (at least two needed)")
|
||||||
|
@ -97,7 +97,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||||
Eventually(func() bool {
|
Eventually(func() bool {
|
||||||
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
||||||
}, 10*time.Second, framework.Poll).Should(BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||||
p2 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD))
|
p2 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD))
|
||||||
|
|
||||||
By("Checking that pods got a different GPU")
|
By("Checking that pods got a different GPU")
|
||||||
|
|
Loading…
Reference in New Issue