Merge pull request #74508 from danielqsj/uapps

Fix golint failures for e2e/upgrades/...
pull/564/head
Kubernetes Prow Robot 2019-02-25 21:48:42 -08:00 committed by GitHub
commit 33a0afafe7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 278 additions and 254 deletions

View File

@ -673,9 +673,6 @@ test/e2e/storage/testsuites
test/e2e/storage/utils
test/e2e/storage/vsphere
test/e2e/ui
test/e2e/upgrades
test/e2e/upgrades/apps
test/e2e/upgrades/storage
test/e2e/windows
test/e2e_kubeadm
test/e2e_node

View File

@ -66,7 +66,7 @@ var gpuUpgradeTests = []upgrades.Test{
}
var statefulsetUpgradeTests = []upgrades.Test{
&upgrades.MySqlUpgradeTest{},
&upgrades.MySQLUpgradeTest{},
&upgrades.EtcdUpgradeTest{},
&upgrades.CassandraUpgradeTest{},
}

View File

@ -22,8 +22,8 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/onsi/gomega/gstruct"
)
@ -32,8 +32,10 @@ type AppArmorUpgradeTest struct {
pod *api.Pod
}
// Name returns the tracking name of the test.
func (AppArmorUpgradeTest) Name() string { return "apparmor-upgrade" }
// Skip returns true when this test can be skipped.
func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
supportedImages := make(map[string]bool)
for _, d := range common.AppArmorDistros {
@ -50,11 +52,11 @@ func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
// Setup creates a secret and then verifies that a pod can consume it.
func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) {
By("Loading AppArmor profiles to nodes")
ginkgo.By("Loading AppArmor profiles to nodes")
common.LoadAppArmorProfiles(f)
// Create the initial test pod.
By("Creating a long-running AppArmor enabled pod.")
ginkgo.By("Creating a long-running AppArmor enabled pod.")
t.pod = common.CreateAppArmorTestPod(f, false, false)
// Verify initial state.
@ -76,32 +78,32 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
// Teardown cleans up any remaining resources.
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
By("Logging container failures")
ginkgo.By("Logging container failures")
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
}
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
By("Verifying an AppArmor profile is continuously enforced for a pod")
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Should be able to get pod")
Expect(pod.Status.Phase).To(Equal(api.PodRunning), "Pod should stay running")
Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(BeNil(), "Container should be running")
Expect(pod.Status.ContainerStatuses[0].RestartCount).To(BeZero(), "Container should not need to be restarted")
gomega.Expect(pod.Status.Phase).To(gomega.Equal(api.PodRunning), "Pod should stay running")
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
}
func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) {
By("Verifying an AppArmor profile is enforced for a new pod")
ginkgo.By("Verifying an AppArmor profile is enforced for a new pod")
common.CreateAppArmorTestPod(f, false, true)
}
func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) {
By("Verifying nodes are AppArmor enabled")
ginkgo.By("Verifying nodes are AppArmor enabled")
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list nodes")
for _, node := range nodes.Items {
Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
gomega.Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
"Ready": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
"Message": ContainSubstring("AppArmor enabled"),
"Message": gomega.ContainSubstring("AppArmor enabled"),
}),
}))
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package upgrades
import (
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
@ -35,6 +35,7 @@ type DaemonSetUpgradeTest struct {
daemonSet *apps.DaemonSet
}
// Name returns the tracking name of the test.
func (DaemonSetUpgradeTest) Name() string { return "[sig-apps] daemonset-upgrade" }
// Setup creates a DaemonSet and verifies that it's running
@ -74,29 +75,29 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
},
}
By("Creating a DaemonSet")
ginkgo.By("Creating a DaemonSet")
var err error
if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
}
By("Waiting for DaemonSet pods to become ready")
ginkgo.By("Waiting for DaemonSet pods to become ready")
err = wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
return checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
})
framework.ExpectNoError(err)
By("Validating the DaemonSet after creation")
ginkgo.By("Validating the DaemonSet after creation")
t.validateRunningDaemonSet(f)
}
// Test waits until the upgrade has completed and then verifies that the DaemonSet
// is still running
func (t *DaemonSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
By("Waiting for upgradet to complete before re-validating DaemonSet")
ginkgo.By("Waiting for upgradet to complete before re-validating DaemonSet")
<-done
By("validating the DaemonSet is still running after upgrade")
ginkgo.By("validating the DaemonSet is still running after upgrade")
t.validateRunningDaemonSet(f)
}
@ -106,7 +107,7 @@ func (t *DaemonSetUpgradeTest) Teardown(f *framework.Framework) {
}
func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) {
By("confirming the DaemonSet pods are running on all expected nodes")
ginkgo.By("confirming the DaemonSet pods are running on all expected nodes")
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
framework.ExpectNoError(err)
if !res {
@ -114,7 +115,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
}
// DaemonSet resource itself should be good
By("confirming the DaemonSet resource is in a good state")
ginkgo.By("confirming the DaemonSet resource is in a good state")
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
framework.ExpectNoError(err)
if !res {

View File

@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -46,6 +46,7 @@ type DeploymentUpgradeTest struct {
newRSUID types.UID
}
// Name returns the tracking name of the test.
func (DeploymentUpgradeTest) Name() string { return "[sig-apps] deployment-upgrade" }
// Setup creates a deployment and makes sure it has a new and an old replicaset running.
@ -57,15 +58,15 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
deploymentClient := c.AppsV1().Deployments(ns)
rsClient := c.AppsV1().ReplicaSets(ns)
By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d)
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
@ -76,20 +77,20 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
}
t.oldRSUID = rss[0].UID
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "1"))
// Trigger a new rollout so that we have some history.
By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss = rsList.Items
@ -97,7 +98,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
}
By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
ginkgo.By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
switch t.oldRSUID {
case rss[0].UID:
t.newRSUID = rss[1].UID
@ -107,7 +108,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(fmt.Errorf("old replicaset with UID %q does not survive rollout", t.oldRSUID))
}
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "2"))
t.oldDeploymentUID = deployment.UID
@ -116,7 +117,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
// Test checks whether the replicasets for a deployment are the same after an upgrade.
func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName))
<-done
c := f.ClientSet
@ -127,10 +128,10 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
Expect(deployment.UID).To(Equal(t.oldDeploymentUID))
ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
gomega.Expect(deployment.UID).To(gomega.Equal(t.oldDeploymentUID))
By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
@ -142,27 +143,27 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
switch t.oldRSUID {
case rss[0].UID:
Expect(rss[1].UID).To(Equal(t.newRSUID))
gomega.Expect(rss[1].UID).To(gomega.Equal(t.newRSUID))
case rss[1].UID:
Expect(rss[0].UID).To(Equal(t.newRSUID))
gomega.Expect(rss[0].UID).To(gomega.Equal(t.newRSUID))
default:
framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName))
}
By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(Equal("2"))
ginkgo.By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
gomega.Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(gomega.Equal("2"))
By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
// Verify the upgraded deployment is active by scaling up the deployment by 1
By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
}

View File

@ -22,8 +22,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// JobUpgradeTest is a test harness for batch Jobs.
@ -32,30 +32,31 @@ type JobUpgradeTest struct {
namespace string
}
// Name returns the tracking name of the test.
func (JobUpgradeTest) Name() string { return "[sig-apps] job-upgrade" }
// Setup starts a Job with a parallelism of 2 and 2 completions running.
func (t *JobUpgradeTest) Setup(f *framework.Framework) {
t.namespace = f.Namespace.Name
By("Creating a job")
ginkgo.By("Creating a job")
t.job = framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
job, err := framework.CreateJob(f.ClientSet, t.namespace, t.job)
t.job = job
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Ensuring active pods == parallelism")
ginkgo.By("Ensuring active pods == parallelism")
err = framework.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
// Test verifies that the Jobs Pods are running after the an upgrade
func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
By("Ensuring active pods == parallelism")
ginkgo.By("Ensuring active pods == parallelism")
running, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(running).To(gomega.BeTrue())
}
// Teardown cleans up any remaining resources.

View File

@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -45,19 +45,21 @@ type ReplicaSetUpgradeTest struct {
UID types.UID
}
// Name returns the tracking name of the test.
func (ReplicaSetUpgradeTest) Name() string { return "[sig-apps] replicaset-upgrade" }
// Setup creates a ReplicaSet and makes sure it's replicas ready.
func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
c := f.ClientSet
ns := f.Namespace.Name
nginxImage := imageutils.GetE2EImage(imageutils.Nginx)
By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
rs, err := c.AppsV1().ReplicaSets(ns).Create(replicaSet)
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
r.UID = rs.UID
@ -70,28 +72,28 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
rsClient := c.AppsV1().ReplicaSets(ns)
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName))
ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName))
<-done
// Verify the RS is the same (survives) after the upgrade
By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName))
ginkgo.By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName))
upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{})
framework.ExpectNoError(err)
if upgradedRS.UID != r.UID {
framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID))
}
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
_, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *apps.ReplicaSet) {
*rs.Spec.Replicas = scaleNum
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
}

View File

@ -17,8 +17,8 @@ limitations under the License.
package upgrades
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@ -35,8 +35,10 @@ type StatefulSetUpgradeTest struct {
set *apps.StatefulSet
}
// Name returns the tracking name of the test.
func (StatefulSetUpgradeTest) Name() string { return "[sig-apps] statefulset-upgrade" }
// Skip returns true when this test can be skipped.
func (StatefulSetUpgradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.5.0")
@ -65,50 +67,50 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
t.tester = framework.NewStatefulSetTester(f.ClientSet)
t.tester.PauseNewPods(t.set)
By("Creating service " + headlessSvcName + " in namespace " + ns)
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.CoreV1().Services(ns).Create(t.service)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Creating statefulset " + ssName + " in namespace " + ns)
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(t.set.Spec.Replicas) = 3
_, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Saturating stateful set " + t.set.Name)
ginkgo.By("Saturating stateful set " + t.set.Name)
t.tester.Saturate(t.set)
t.verify()
t.restart()
t.verify()
}
// Waits for the upgrade to complete and verifies the StatefulSet basic functionality
// Test waits for the upgrade to complete and verifies the StatefulSet basic functionality
func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
t.verify()
}
// Deletes all StatefulSets
// Teardown deletes all StatefulSets
func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) {
framework.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
}
func (t *StatefulSetUpgradeTest) verify() {
By("Verifying statefulset mounted data directory is usable")
ginkgo.By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(t.tester.CheckMount(t.set, "/data"))
By("Verifying statefulset provides a stable hostname for each pod")
ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
framework.ExpectNoError(t.tester.CheckHostname(t.set))
By("Verifying statefulset set proper service name")
ginkgo.By("Verifying statefulset set proper service name")
framework.ExpectNoError(t.tester.CheckServiceName(t.set, t.set.Spec.ServiceName))
cmd := "echo $(hostname) > /data/hostname; sync;"
By("Running " + cmd + " in all stateful pods")
ginkgo.By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(t.tester.ExecInStatefulPods(t.set, cmd))
}
func (t *StatefulSetUpgradeTest) restart() {
By("Restarting statefulset " + t.set.Name)
ginkgo.By("Restarting statefulset " + t.set.Name)
t.tester.Restart(t.set)
t.tester.WaitForRunningAndReady(*t.set.Spec.Replicas, t.set)
}

View File

@ -25,8 +25,8 @@ import (
"sync"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version"
@ -60,7 +60,7 @@ func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool {
}
func cassandraKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file), Fail))
input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file), ginkgo.Fail))
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
}
@ -75,16 +75,16 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a PDB")
ginkgo.By("Creating a PDB")
cassandraKubectlCreate(ns, "pdb.yaml")
By("Creating a Cassandra StatefulSet")
ginkgo.By("Creating a Cassandra StatefulSet")
t.ssTester.CreateStatefulSet(cassandraManifestPath, ns)
By("Creating a cassandra-test-server deployment")
ginkgo.By("Creating a cassandra-test-server deployment")
cassandraKubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the services")
ginkgo.By("Getting the ingress IPs from the services")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil
@ -95,18 +95,18 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Service endpoint is up")
By("Adding 2 dummy users")
Expect(t.addUser("Alice")).NotTo(HaveOccurred())
Expect(t.addUser("Bob")).NotTo(HaveOccurred())
ginkgo.By("Adding 2 dummy users")
gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
gomega.Expect(t.addUser("Bob")).NotTo(gomega.HaveOccurred())
t.successfulWrites = 2
By("Verifying that the users exist")
ginkgo.By("Verifying that the users exist")
users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred())
Expect(len(users)).To(Equal(2))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(len(users)).To(gomega.Equal(2))
}
// listUsers gets a list of users from the db via the tester service.
@ -151,7 +151,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error {
// getServiceIP is a helper method to extract the Ingress IP from the service.
func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
@ -165,7 +165,7 @@ func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName
// ratio is over a certain threshold (0.75). We also verify that we get
// at least the same number of rows back as we successfully wrote.
func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
By("Continuously polling the database during upgrade.")
ginkgo.By("Continuously polling the database during upgrade.")
var (
success, failures, writeAttempts, lastUserCount int
mu sync.Mutex
@ -199,19 +199,19 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
}, 10*time.Millisecond, done)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
Expect(lastUserCount >= t.successfulWrites).To(BeTrue())
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
ratio := float64(success) / float64(success+failures)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs.
Expect(ratio > 0.75).To(BeTrue())
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
}
// Teardown does one final check of the data's availability.
func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred())
Expect(len(users) >= t.successfulWrites).To(BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
}

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/uuid"
)
@ -34,6 +34,7 @@ type ConfigMapUpgradeTest struct {
configMap *v1.ConfigMap
}
// Name returns the tracking name of the test.
func (ConfigMapUpgradeTest) Name() string {
return "[sig-storage] [sig-api-machinery] configmap-upgrade"
}
@ -54,13 +55,13 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
},
}
By("Creating a ConfigMap")
ginkgo.By("Creating a ConfigMap")
var err error
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil {
framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
}
By("Making sure the ConfigMap is consumable")
ginkgo.By("Making sure the ConfigMap is consumable")
t.testPod(f)
}
@ -68,7 +69,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
// pod can still consume the ConfigMap.
func (t *ConfigMapUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
By("Consuming the ConfigMap after upgrade")
ginkgo.By("Consuming the ConfigMap after upgrade")
t.testPod(f)
}

View File

@ -25,8 +25,8 @@ import (
"sync"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version"
@ -37,14 +37,17 @@ import (
const manifestPath = "test/e2e/testing-manifests/statefulset/etcd"
// EtcdUpgradeTest tests that etcd is writable before and after a cluster upgrade.
type EtcdUpgradeTest struct {
ip string
successfulWrites int
ssTester *framework.StatefulSetTester
}
// Name returns the tracking name of the test.
func (EtcdUpgradeTest) Name() string { return "etcd-upgrade" }
// Skip returns true when this test can be skipped.
func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.6.0")
for _, vCtx := range upgCtx.Versions {
@ -56,26 +59,27 @@ func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
}
func kubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), Fail))
input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), ginkgo.Fail))
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
}
// Setup creates etcd statefulset and then verifies that the etcd is writable.
func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name
statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a PDB")
ginkgo.By("Creating a PDB")
kubectlCreate(ns, "pdb.yaml")
By("Creating an etcd StatefulSet")
ginkgo.By("Creating an etcd StatefulSet")
t.ssTester.CreateStatefulSet(manifestPath, ns)
By("Creating an etcd--test-server deployment")
ginkgo.By("Creating an etcd--test-server deployment")
kubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the services")
ginkgo.By("Getting the ingress IPs from the services")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil
@ -86,18 +90,18 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Service endpoint is up")
By("Adding 2 dummy users")
Expect(t.addUser("Alice")).NotTo(HaveOccurred())
Expect(t.addUser("Bob")).NotTo(HaveOccurred())
ginkgo.By("Adding 2 dummy users")
gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
gomega.Expect(t.addUser("Bob")).NotTo(gomega.HaveOccurred())
t.successfulWrites = 2
By("Verifying that the users exist")
ginkgo.By("Verifying that the users exist")
users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred())
Expect(len(users)).To(Equal(2))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(len(users)).To(gomega.Equal(2))
}
func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
@ -139,7 +143,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error {
func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
@ -147,8 +151,9 @@ func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName strin
return ingress[0].IP
}
// Test waits for upgrade to complete and verifies if etcd is writable.
func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
By("Continuously polling the database during upgrade.")
ginkgo.By("Continuously polling the database during upgrade.")
var (
success, failures, writeAttempts, lastUserCount int
mu sync.Mutex
@ -182,19 +187,19 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
}, 10*time.Millisecond, done)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
Expect(lastUserCount >= t.successfulWrites).To(BeTrue())
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
ratio := float64(success) / float64(success+failures)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs.
Expect(ratio > 0.75).To(BeTrue())
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
}
// Teardown does one final check of the data's availability.
func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred())
Expect(len(users) >= t.successfulWrites).To(BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
}

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
// HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade.
@ -33,9 +33,10 @@ type HPAUpgradeTest struct {
hpa *autoscalingv1.HorizontalPodAutoscaler
}
// Name returns the tracking name of the test.
func (HPAUpgradeTest) Name() string { return "hpa-upgrade" }
// Creates a resource consumer and an HPA object that autoscales the consumer.
// Setup creates a resource consumer and an HPA object that autoscales the consumer.
func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
t.rc = common.NewDynamicResourceConsumer(
"res-cons-upgrade",
@ -63,7 +64,7 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
// Test waits for upgrade to complete and verifies if HPA works correctly.
func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA"))
ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA"))
<-done
t.test()
}
@ -79,19 +80,19 @@ func (t *HPAUpgradeTest) test() {
const timeToWait = 15 * time.Minute
t.rc.Resume()
By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
ginkgo.By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
t.rc.ConsumeCPU(10) /* millicores */
By(fmt.Sprintf("HPA waits for 1 replica"))
ginkgo.By(fmt.Sprintf("HPA waits for 1 replica"))
t.rc.WaitForReplicas(1, timeToWait)
By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
ginkgo.By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
t.rc.ConsumeCPU(250) /* millicores */
By(fmt.Sprintf("HPA waits for 3 replicas"))
ginkgo.By(fmt.Sprintf("HPA waits for 3 replicas"))
t.rc.WaitForReplicas(3, timeToWait)
By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
ginkgo.By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
t.rc.ConsumeCPU(700) /* millicores */
By(fmt.Sprintf("HPA waits for 5 replicas"))
ginkgo.By(fmt.Sprintf("HPA waits for 5 replicas"))
t.rc.WaitForReplicas(5, timeToWait)
// We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail.

View File

@ -24,7 +24,7 @@ import (
"reflect"
"github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
compute "google.golang.org/api/compute/v1"
extensions "k8s.io/api/extensions/v1beta1"
@ -61,10 +61,11 @@ type GCPResourceStore struct {
TpsList []*compute.TargetHttpsProxy
SslList []*compute.SslCertificate
BeList []*compute.BackendService
Ip *compute.Address
IP *compute.Address
IgList []*compute.InstanceGroup
}
// Name returns the tracking name of the test.
func (IngressUpgradeTest) Name() string { return "ingress-upgrade" }
// Setup creates a GLBC, allocates an ip, and an ingress resource,
@ -96,17 +97,17 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
t.ip = t.gceController.CreateStaticIP(t.ipName)
// Create a working basic Ingress
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))
ginkgo.By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{
ingress.IngressStaticIPKey: t.ipName,
ingress.IngressAllowHTTPKey: "false",
}, map[string]string{})
t.jig.SetHTTPS("tls-secret", "ingress.test.com")
By("waiting for Ingress to come up with ip: " + t.ip)
ginkgo.By("waiting for Ingress to come up with ip: " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
By("keeping track of GCP resources created by Ingress")
ginkgo.By("keeping track of GCP resources created by Ingress")
t.resourceStore = &GCPResourceStore{}
t.populateGCPResourceStore(t.resourceStore)
}
@ -134,18 +135,18 @@ func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
// Teardown cleans up any remaining resources.
func (t *IngressUpgradeTest) Teardown(f *framework.Framework) {
if CurrentGinkgoTestDescription().Failed {
if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(t.gceController.Ns)
}
if t.jig.Ingress != nil {
By("Deleting ingress")
ginkgo.By("Deleting ingress")
t.jig.TryDeleteIngress()
} else {
By("No ingress created, no cleanup necessary")
ginkgo.By("No ingress created, no cleanup necessary")
}
By("Cleaning up cloud resources")
ginkgo.By("Cleaning up cloud resources")
framework.ExpectNoError(t.gceController.CleanupGCEIngressController())
}
@ -171,20 +172,20 @@ func (t *IngressUpgradeTest) Skip(upgCtx UpgradeContext) bool {
func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption {
By("continuously hitting the Ingress IP")
ginkgo.By("continuously hitting the Ingress IP")
wait.Until(func() {
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
}, t.jig.PollInterval, done)
} else {
By("waiting for upgrade to finish without checking if Ingress remains up")
ginkgo.By("waiting for upgrade to finish without checking if Ingress remains up")
<-done
}
By("hitting the Ingress IP " + t.ip)
ginkgo.By("hitting the Ingress IP " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
// We want to manually trigger a sync because then we can easily verify
// a correct sync completed after update.
By("updating ingress spec to manually trigger a sync")
ginkgo.By("updating ingress spec to manually trigger a sync")
t.jig.Update(func(ing *extensions.Ingress) {
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append(
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths,
@ -197,7 +198,7 @@ func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}
// WaitForIngress() tests that all paths are pinged, which is how we know
// everything is synced with the cloud.
t.jig.WaitForIngress(false)
By("comparing GCP resources post-upgrade")
ginkgo.By("comparing GCP resources post-upgrade")
postUpgradeResourceStore := &GCPResourceStore{}
t.populateGCPResourceStore(postUpgradeResourceStore)
@ -238,7 +239,7 @@ func (t *IngressUpgradeTest) populateGCPResourceStore(resourceStore *GCPResource
resourceStore.TpsList = cont.ListTargetHttpsProxies()
resourceStore.SslList = cont.ListSslCertificates()
resourceStore.BeList = cont.ListGlobalBackendServices()
resourceStore.Ip = cont.GetGlobalAddress(t.ipName)
resourceStore.IP = cont.GetGlobalAddress(t.ipName)
resourceStore.IgList = cont.ListInstanceGroups()
}

View File

@ -28,8 +28,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -43,12 +43,13 @@ const (
type KubeProxyUpgradeTest struct {
}
// Name returns the tracking name of the test.
func (KubeProxyUpgradeTest) Name() string { return "[sig-network] kube-proxy-upgrade" }
// Setup verifies kube-proxy static pods is running before uprgade.
func (t *KubeProxyUpgradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(HaveOccurred())
ginkgo.By("Waiting for kube-proxy static pods running and ready")
gomega.Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(gomega.HaveOccurred())
}
// Test validates if kube-proxy is migrated from static pods to DaemonSet.
@ -56,14 +57,14 @@ func (t *KubeProxyUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
c := f.ClientSet
// Block until upgrade is done.
By("Waiting for upgrade to finish")
ginkgo.By("Waiting for upgrade to finish")
<-done
By("Waiting for kube-proxy static pods disappear")
Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(HaveOccurred())
ginkgo.By("Waiting for kube-proxy static pods disappear")
gomega.Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(gomega.HaveOccurred())
By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(HaveOccurred())
ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
gomega.Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(gomega.HaveOccurred())
}
// Teardown does nothing.
@ -74,12 +75,13 @@ func (t *KubeProxyUpgradeTest) Teardown(f *framework.Framework) {
type KubeProxyDowngradeTest struct {
}
// Name returns the tracking name of the test.
func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-downgrade" }
// Setup verifies kube-proxy DaemonSet is running before uprgade.
func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(HaveOccurred())
ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
gomega.Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(gomega.HaveOccurred())
}
// Test validates if kube-proxy is migrated from DaemonSet to static pods.
@ -87,14 +89,14 @@ func (t *KubeProxyDowngradeTest) Test(f *framework.Framework, done <-chan struct
c := f.ClientSet
// Block until upgrade is done.
By("Waiting for upgrade to finish")
ginkgo.By("Waiting for upgrade to finish")
<-done
By("Waiting for kube-proxy DaemonSet disappear")
Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(HaveOccurred())
ginkgo.By("Waiting for kube-proxy DaemonSet disappear")
gomega.Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(gomega.HaveOccurred())
By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(HaveOccurred())
ginkgo.By("Waiting for kube-proxy static pods running and ready")
gomega.Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(gomega.HaveOccurred())
}
// Teardown does nothing.

View File

@ -25,8 +25,8 @@ import (
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version"
@ -37,17 +37,19 @@ import (
const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade"
// MySqlUpgradeTest implements an upgrade test harness that polls a replicated sql database.
type MySqlUpgradeTest struct {
// MySQLUpgradeTest implements an upgrade test harness that polls a replicated sql database.
type MySQLUpgradeTest struct {
ip string
successfulWrites int
nextWrite int
ssTester *framework.StatefulSetTester
}
func (MySqlUpgradeTest) Name() string { return "mysql-upgrade" }
// Name returns the tracking name of the test.
func (MySQLUpgradeTest) Name() string { return "mysql-upgrade" }
func (MySqlUpgradeTest) Skip(upgCtx UpgradeContext) bool {
// Skip returns true when this test can be skipped.
func (MySQLUpgradeTest) Skip(upgCtx UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.5.0")
for _, vCtx := range upgCtx.Versions {
@ -59,13 +61,13 @@ func (MySqlUpgradeTest) Skip(upgCtx UpgradeContext) bool {
}
func mysqlKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file), Fail))
input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file), ginkgo.Fail))
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
}
func (t *MySqlUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
@ -77,22 +79,22 @@ func (t *MySqlUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName stri
// from the db. It then connects to the db with the write Service and populates the db with a table
// and a few entries. Finally, it connects to the db with the read Service, and confirms the data is
// available. The db connections are left open to be used later in the test.
func (t *MySqlUpgradeTest) Setup(f *framework.Framework) {
func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name
statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a configmap")
ginkgo.By("Creating a configmap")
mysqlKubectlCreate(ns, "configmap.yaml")
By("Creating a mysql StatefulSet")
ginkgo.By("Creating a mysql StatefulSet")
t.ssTester.CreateStatefulSet(mysqlManifestPath, ns)
By("Creating a mysql-test-server deployment")
ginkgo.By("Creating a mysql-test-server deployment")
mysqlKubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the test-service")
ginkgo.By("Getting the ingress IPs from the test-service")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil
@ -103,24 +105,24 @@ func (t *MySqlUpgradeTest) Setup(f *framework.Framework) {
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Service endpoint is up")
By("Adding 2 names to the database")
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred())
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred())
ginkgo.By("Adding 2 names to the database")
gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
By("Verifying that the 2 names have been inserted")
ginkgo.By("Verifying that the 2 names have been inserted")
count, err := t.countNames()
Expect(err).NotTo(HaveOccurred())
Expect(count).To(Equal(2))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(count).To(gomega.Equal(2))
}
// Test continually polls the db using the read and write connections, inserting data, and checking
// that all the data is readable.
func (t *MySqlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
var writeSuccess, readSuccess, writeFailure, readFailure int
By("Continuously polling the database during upgrade.")
ginkgo.By("Continuously polling the database during upgrade.")
go wait.Until(func() {
_, err := t.countNames()
if err != nil {
@ -162,14 +164,14 @@ func (t *MySqlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
}
// Teardown performs one final check of the data's availability.
func (t *MySqlUpgradeTest) Teardown(f *framework.Framework) {
func (t *MySQLUpgradeTest) Teardown(f *framework.Framework) {
count, err := t.countNames()
Expect(err).NotTo(HaveOccurred())
Expect(count >= t.successfulWrites).To(BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(count >= t.successfulWrites).To(gomega.BeTrue())
}
// addName adds a new value to the db.
func (t *MySqlUpgradeTest) addName(name string) error {
func (t *MySQLUpgradeTest) addName(name string) error {
val := map[string][]string{"name": {name}}
t.nextWrite++
r, err := http.PostForm(fmt.Sprintf("http://%s:8080/addName", t.ip), val)
@ -189,7 +191,7 @@ func (t *MySqlUpgradeTest) addName(name string) error {
// countNames checks to make sure the values in testing.users are available, and returns
// the count of them.
func (t *MySqlUpgradeTest) countNames() (int, error) {
func (t *MySQLUpgradeTest) countNames() (int, error) {
r, err := http.Get(fmt.Sprintf("http://%s:8080/countNames", t.ip))
if err != nil {
return 0, err

View File

@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/test/e2e/scheduling"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// NvidiaGPUUpgradeTest tests that gpu resource is available before and after
@ -35,12 +35,13 @@ import (
type NvidiaGPUUpgradeTest struct {
}
// Name returns the tracking name of the test.
func (NvidiaGPUUpgradeTest) Name() string { return "nvidia-gpu-upgrade [sig-node] [sig-scheduling]" }
// Setup creates a job requesting gpu.
func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) {
scheduling.SetupNVIDIAGPUNode(f, false)
By("Creating a job requesting gpu")
ginkgo.By("Creating a job requesting gpu")
t.startJob(f)
}
@ -48,13 +49,13 @@ func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) {
// cuda pod started by the gpu job can successfully finish.
func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
By("Verifying gpu job success")
ginkgo.By("Verifying gpu job success")
t.verifyJobPodSuccess(f)
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
// MasterUpgrade should be totally hitless.
job, err := framework.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
Expect(err).NotTo(HaveOccurred())
Expect(job.Status.Failed).To(BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(job.Status.Failed).To(gomega.BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed)
}
}
@ -85,12 +86,12 @@ func (t *NvidiaGPUUpgradeTest) startJob(f *framework.Framework) {
}
ns := f.Namespace.Name
_, err := framework.CreateJob(f.ClientSet, ns, testJob)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Created job %v", testJob)
By("Waiting for gpu job pod start")
ginkgo.By("Waiting for gpu job pod start")
err = framework.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1)
Expect(err).NotTo(HaveOccurred())
By("Done with gpu job pod start")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Done with gpu job pod start")
}
// verifyJobPodSuccess verifies that the started cuda pod successfully passes.
@ -98,9 +99,9 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
// Wait for client pod to complete.
ns := f.Namespace.Name
err := framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, "cuda-add", 1)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
createdPod := pods.Items[0].Name
framework.Logf("Created pod %v", createdPod)
f.PodClient().WaitForSuccess(createdPod, 5*time.Minute)
@ -108,5 +109,5 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
framework.ExpectNoError(err, "Should be able to get pod logs")
framework.Logf("Got pod logs: %v", logs)
regex := regexp.MustCompile("PASSED")
Expect(regex.MatchString(logs)).To(BeTrue())
gomega.Expect(regex.MatchString(logs)).To(gomega.BeTrue())
}

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
// SecretUpgradeTest test that a secret is available before and after
@ -34,6 +34,7 @@ type SecretUpgradeTest struct {
secret *v1.Secret
}
// Name returns the tracking name of the test.
func (SecretUpgradeTest) Name() string { return "[sig-storage] [sig-api-machinery] secret-upgrade" }
// Setup creates a secret and then verifies that a pod can consume it.
@ -52,13 +53,13 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
},
}
By("Creating a secret")
ginkgo.By("Creating a secret")
var err error
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil {
framework.Failf("unable to create test secret %s: %v", t.secret.Name, err)
}
By("Making sure the secret is consumable")
ginkgo.By("Making sure the secret is consumable")
t.testPod(f)
}
@ -66,7 +67,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
// pod can still consume the secret.
func (t *SecretUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
By("Consuming the secret after upgrade")
ginkgo.By("Consuming the secret after upgrade")
t.testPod(f)
}

View File

@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
// ServiceUpgradeTest tests that a service is available before and
@ -34,6 +34,7 @@ type ServiceUpgradeTest struct {
svcPort int
}
// Name returns the tracking name of the test.
func (ServiceUpgradeTest) Name() string { return "service-upgrade" }
func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") }
@ -45,7 +46,7 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name)
ginkgo.By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name)
tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeLoadBalancer
})
@ -56,16 +57,16 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
svcPort := int(tcpService.Spec.Ports[0].Port)
By("creating pod to be part of service " + serviceName)
ginkgo.By("creating pod to be part of service " + serviceName)
rc := jig.RunOrFail(ns.Name, jig.AddRCAntiAffinity)
if shouldTestPDBs() {
By("creating a PodDisruptionBudget to cover the ReplicationController")
ginkgo.By("creating a PodDisruptionBudget to cover the ReplicationController")
jig.CreatePDBOrFail(ns.Name, rc)
}
// Hit it once before considering ourselves ready
By("hitting the pod through the service's LoadBalancer")
ginkgo.By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig = jig
@ -95,18 +96,18 @@ func (t *ServiceUpgradeTest) Teardown(f *framework.Framework) {
func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption {
// Continuous validation
By("continuously hitting the pod through the service's LoadBalancer")
ginkgo.By("continuously hitting the pod through the service's LoadBalancer")
wait.Until(func() {
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
}, framework.Poll, done)
} else {
// Block until upgrade is done
By("waiting for upgrade to finish without checking if service remains up")
ginkgo.By("waiting for upgrade to finish without checking if service remains up")
<-done
}
// Sanity check and hit it once more
By("hitting the pod through the service's LoadBalancer")
ginkgo.By("hitting the pod through the service's LoadBalancer")
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer)
}

View File

@ -21,8 +21,8 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/upgrades"
)
@ -33,6 +33,7 @@ type PersistentVolumeUpgradeTest struct {
pvc *v1.PersistentVolumeClaim
}
// Name returns the tracking name of the test.
func (PersistentVolumeUpgradeTest) Name() string { return "[sig-storage] persistent-volume-upgrade" }
const (
@ -55,7 +56,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name
By("Initializing PV source")
ginkgo.By("Initializing PV source")
t.pvSource, _ = framework.CreateGCEVolume()
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "pv-upgrade",
@ -65,12 +66,12 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
emptyStorageClass := ""
pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &emptyStorageClass}
By("Creating the PV and PVC")
ginkgo.By("Creating the PV and PVC")
t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc))
By("Consuming the PV before upgrade")
ginkgo.By("Consuming the PV before upgrade")
t.testPod(f, pvWriteCmd+";"+pvReadCmd)
}
@ -78,7 +79,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
// and that the volume data persists.
func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
By("Consuming the PV after upgrade")
ginkgo.By("Consuming the PV after upgrade")
t.testPod(f, pvReadCmd)
}

View File

@ -27,8 +27,8 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const devicePath = "/mnt/volume1"
@ -43,10 +43,12 @@ type VolumeModeDowngradeTest struct {
pod *v1.Pod
}
// Name returns the tracking name of the test.
func (VolumeModeDowngradeTest) Name() string {
return "[sig-storage] volume-mode-downgrade"
}
// Skip returns true when this test can be skipped.
func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") {
return true
@ -72,7 +74,7 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
cs := f.ClientSet
ns := f.Namespace.Name
By("Creating a PVC")
ginkgo.By("Creating a PVC")
block := v1.PersistentVolumeBlock
pvcConfig := framework.PersistentVolumeClaimConfig{
StorageClassName: nil,
@ -80,46 +82,46 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
}
t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
t.pvc, err = framework.CreatePVC(cs, ns, t.pvc)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Consuming the PVC before downgrade")
ginkgo.By("Consuming the PVC before downgrade")
t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Checking if PV exists as expected volume mode")
ginkgo.By("Checking if PV exists as expected volume mode")
utils.CheckVolumeModeOfPath(t.pod, block, devicePath)
By("Checking if read/write to PV works properly")
ginkgo.By("Checking if read/write to PV works properly")
utils.CheckReadWriteToPath(t.pod, block, devicePath)
}
// Test waits for the downgrade to complete, and then verifies that a pod can no
// longer consume the pv as it is not mapped nor mounted into the pod
func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
By("Waiting for downgrade to finish")
ginkgo.By("Waiting for downgrade to finish")
<-done
By("Verifying that nothing exists at the device path in the pod")
ginkgo.By("Verifying that nothing exists at the device path in the pod")
utils.VerifyExecInPodFail(t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
}
// Teardown cleans up any remaining resources.
func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) {
By("Deleting the pod")
ginkgo.By("Deleting the pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, f.ClientSet, t.pod))
By("Deleting the PVC")
ginkgo.By("Deleting the PVC")
framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(t.pvc.Name, nil))
By("Waiting for the PV to be deleted")
ginkgo.By("Waiting for the PV to be deleted")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute))
}

View File

@ -19,8 +19,8 @@ package upgrades
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -32,7 +32,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
)
// SecretUpgradeTest tests that a pod with sysctls runs before and after an upgrade. During
// SysctlUpgradeTest tests that a pod with sysctls runs before and after an upgrade. During
// a master upgrade, the exact pod is expected to stay running. A pod with unsafe sysctls is
// expected to keep failing before and after the upgrade.
type SysctlUpgradeTest struct {
@ -53,19 +53,19 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
<-done
switch upgrade {
case MasterUpgrade, ClusterUpgrade:
By("Checking the safe sysctl pod keeps running on master upgrade")
ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
}
By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
if err == nil {
Expect(pod.Status.Phase).NotTo(Equal(v1.PodRunning))
gomega.Expect(pod.Status.Phase).NotTo(gomega.Equal(v1.PodRunning))
}
t.verifySafeSysctlWork(f)
@ -78,15 +78,15 @@ func (t *SysctlUpgradeTest) Teardown(f *framework.Framework) {
}
func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod {
By("Creating a pod with safe sysctls")
ginkgo.By("Creating a pod with safe sysctls")
safeSysctl := "net.ipv4.ip_local_port_range"
safeSysctlValue := "1024 1042"
validPod := sysctlTestPod("valid-sysctls", map[string]string{safeSysctl: safeSysctlValue})
validPod = f.PodClient().Create(t.validPod)
By("Making sure the valid pod launches")
ginkgo.By("Making sure the valid pod launches")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
@ -96,19 +96,19 @@ func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod
}
func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framework) *v1.Pod {
By("Creating a pod with unsafe sysctls")
ginkgo.By("Creating a pod with unsafe sysctls")
invalidPod := sysctlTestPod("valid-sysctls-"+string(uuid.NewUUID()), map[string]string{
"fs.mount-max": "1000000",
})
invalidPod = f.PodClient().Create(invalidPod)
By("Making sure the invalid pod failed")
ginkgo.By("Making sure the invalid pod failed")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev.Reason).To(Equal(sysctl.ForbiddenReason))
gomega.Expect(ev.Reason).To(gomega.Equal(sysctl.ForbiddenReason))
return invalidPod
}