Merge pull request #18220 from yujuhong/increase_timeout

e2e: increase the container probing test timeout
pull/6/head
Fabio Yeon 2015-12-07 10:44:31 -08:00
commit b7dc1175ba
1 changed files with 45 additions and 13 deletions

View File

@ -17,6 +17,7 @@ limitations under the License.
package e2e
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api"
@ -29,6 +30,11 @@ import (
. "github.com/onsi/gomega"
)
const (
probTestContainerName = "test-webserber"
probTestInitialDelaySeconds = 30
)
var _ = Describe("Probing container", func() {
framework := NewFramework("container-probe")
var podClient client.PodInterface
@ -41,9 +47,8 @@ var _ = Describe("Probing container", func() {
It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() {
p, err := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
expectNoError(err)
startTime := time.Now()
Expect(wait.Poll(poll, 90*time.Second, func() (bool, error) {
Expect(wait.Poll(poll, 120*time.Second, func() (bool, error) {
p, err := podClient.Get(p.Name)
if err != nil {
return false, err
@ -56,17 +61,25 @@ var _ = Describe("Probing container", func() {
return true, nil
})).NotTo(HaveOccurred(), "pod never became ready")
if time.Since(startTime) < 30*time.Second {
Failf("Pod became ready before it's initial delay")
}
p, err = podClient.Get(p.Name)
expectNoError(err)
isReady, err := podRunningReady(p)
expectNoError(err)
Expect(isReady).To(BeTrue(), "pod should be ready")
// We assume the pod became ready when the container became ready. This
// is true for a single container pod.
readyTime, err := getTransitionTimeForReadyCondition(p)
expectNoError(err)
startedTime, err := getContainerStartedTime(p, probTestContainerName)
expectNoError(err)
Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
initialDelay := probTestInitialDelaySeconds * time.Second
if readyTime.Sub(startedTime) < initialDelay {
Failf("Pod became ready before it's %v initial delay", initialDelay)
}
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
@ -75,7 +88,7 @@ var _ = Describe("Probing container", func() {
p, err := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
expectNoError(err)
err = wait.Poll(poll, 90*time.Second, func() (bool, error) {
err = wait.Poll(poll, 120*time.Second, func() (bool, error) {
p, err := podClient.Get(p.Name)
if err != nil {
return false, err
@ -98,6 +111,28 @@ var _ = Describe("Probing container", func() {
})
func getContainerStartedTime(p *api.Pod, containerName string) (time.Time, error) {
for _, status := range p.Status.ContainerStatuses {
if status.Name != containerName {
continue
}
if status.State.Running == nil {
return time.Time{}, fmt.Errorf("Container is not running")
}
return status.State.Running.StartedAt.Time, nil
}
return time.Time{}, fmt.Errorf("cannot find container named %q", containerName)
}
func getTransitionTimeForReadyCondition(p *api.Pod) (time.Time, error) {
for _, cond := range p.Status.Conditions {
if cond.Type == api.PodReady {
return cond.LastTransitionTime.Time, nil
}
}
return time.Time{}, fmt.Errorf("No ready condition can be found for pod")
}
func getRestartCount(p *api.Pod) int {
count := 0
for _, containerStatus := range p.Status.ContainerStatuses {
@ -112,13 +147,10 @@ func makePodSpec(readinessProbe, livenessProbe *api.Probe) *api.Pod {
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "test-webserver",
Name: probTestContainerName,
Image: "gcr.io/google_containers/test-webserver",
LivenessProbe: livenessProbe,
ReadinessProbe: readinessProbe,
}, {
Name: "test-noprobe",
Image: "gcr.io/google_containers/pause:2.0",
},
},
},
@ -151,7 +183,7 @@ func (b webserverProbeBuilder) build() *api.Probe {
},
}
if b.initialDelay {
probe.InitialDelaySeconds = 30
probe.InitialDelaySeconds = probTestInitialDelaySeconds
}
if b.failing {
probe.HTTPGet.Port = intstr.FromInt(81)