remove repeat failure command option and increase timeouts in etcd failure test

pull/6/head
Mike Danese 2015-06-24 14:54:51 -07:00
parent 8b4170ed39
commit 5d8ffbaad9
1 changed files with 7 additions and 20 deletions

View File

@ -23,7 +23,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait"
. "github.com/onsi/ginkgo"
@ -53,7 +52,6 @@ var _ = Describe("Etcd failure", func() {
framework,
"sudo iptables -A INPUT -p tcp --destination-port 4001 -j DROP",
"sudo iptables -D INPUT -p tcp --destination-port 4001 -j DROP",
false,
)
})
@ -62,12 +60,11 @@ var _ = Describe("Etcd failure", func() {
framework,
"pgrep etcd | xargs -I {} sudo kill -9 {}",
"echo 'do nothing. monit should restart etcd.'",
true,
)
})
})
func etcdFailTest(framework Framework, failCommand, fixCommand string, repeat bool) {
func etcdFailTest(framework Framework, failCommand, fixCommand string) {
// This test requires SSH, so the provider check should be identical to
// those tests.
if !providerIs("gce") {
@ -75,7 +72,7 @@ func etcdFailTest(framework Framework, failCommand, fixCommand string, repeat bo
return
}
doEtcdFailure(failCommand, fixCommand, repeat)
doEtcdFailure(failCommand, fixCommand)
checkExistingRCRecovers(framework)
@ -89,21 +86,11 @@ func etcdFailTest(framework Framework, failCommand, fixCommand string, repeat bo
// master and go on to assert that etcd and kubernetes components recover.
const etcdFailureDuration = 20 * time.Second
func doEtcdFailure(failCommand, fixCommand string, repeat bool) {
func doEtcdFailure(failCommand, fixCommand string) {
By("failing etcd")
if repeat {
stop := make(chan struct{}, 1)
go util.Until(func() {
defer GinkgoRecover()
masterExec(failCommand)
}, 1*time.Second, stop)
time.Sleep(etcdFailureDuration)
stop <- struct{}{}
} else {
masterExec(failCommand)
time.Sleep(etcdFailureDuration)
}
masterExec(failCommand)
time.Sleep(etcdFailureDuration)
masterExec(fixCommand)
}
@ -121,7 +108,7 @@ func checkExistingRCRecovers(f Framework) {
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
if err != nil {
Logf("apiserver returned error, as expected before recovery: %v", err)
@ -139,7 +126,7 @@ func checkExistingRCRecovers(f Framework) {
}))
By("waiting for replication controller to recover")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {