Refeactored framework deployment utils

This is the continuation of the refactoring of framework/deployment_utils.go
into framework/deployment.

Signed-off-by: Jorge Alarcon Ochoa <alarcj137@gmail.com>
k3s-v1.15.3
Jorge Alarcon Ochoa 2019-05-02 19:44:51 -04:00
parent 6f1fd17b7a
commit dc619067d9
20 changed files with 318 additions and 254 deletions

View File

@ -80,6 +80,7 @@ go_library(
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
"//test/e2e/apps:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/utils:go_default_library",
"//test/utils/crd:go_default_library",

View File

@ -40,6 +40,7 @@ import (
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
rbacv1beta1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
imageutils "k8s.io/kubernetes/test/utils/image"
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
"k8s.io/utils/pointer"
@ -262,9 +263,9 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
}
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s to complete", etcdImage, deploymentName, namespace)
// kubectl create -f service.yaml
@ -321,7 +322,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
// NOTE: aggregated apis should generally be set up in their own namespace (<aggregated-api-namespace>). As the test framework
// is setting up a new namespace, we are just using that.
err = framework.WaitForDeploymentComplete(client, deployment)
err = e2edeploy.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
// kubectl create -f apiservice.yaml

View File

@ -32,6 +32,7 @@ import (
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/pointer"
@ -254,9 +255,9 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
ginkgo.By("Wait for the deployment to be ready")
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image)
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = framework.WaitForDeploymentComplete(client, deployment)
err = e2edeploy.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentCRDName, namespace)
ginkgo.By("Deploying the webhook service")

View File

@ -38,6 +38,7 @@ import (
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/pointer"
@ -352,9 +353,9 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
ginkgo.By("Wait for the deployment to be ready")
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = framework.WaitForDeploymentComplete(client, deployment)
err = e2edeploy.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentName, namespace)
ginkgo.By("Deploying the webhook service")

View File

@ -39,7 +39,7 @@ import (
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework"
frameworkdeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
testutil "k8s.io/kubernetes/test/utils"
utilpointer "k8s.io/utils/pointer"
@ -231,16 +231,16 @@ func testDeleteDeployment(f *framework.Framework) {
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
framework.Logf("Creating simple deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.AppsV1().Deployments(ns).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deploy)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
@ -278,17 +278,17 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for it to be updated to revision 3546343826724305833.
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
err = framework.WaitForDeploymentComplete(c, deploy)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// There should be 1 old RS (nginx-controller, which is adopted)
@ -307,28 +307,28 @@ func testRecreateDeployment(f *framework.Framework) {
// Create a deployment that brings up redis pods.
deploymentName := "test-recreate-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for it to be updated to revision 1
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Waiting deployment %q to complete", deploymentName)
gomega.Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
// Update deployment to delete redis pods and bring up nginx pods.
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
update.Spec.Template.Spec.Containers[0].Image = NginxImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
gomega.Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(gomega.HaveOccurred())
gomega.Expect(e2edeploy.WatchRecreateDeployment(c, deployment)).NotTo(gomega.HaveOccurred())
}
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
@ -391,13 +391,13 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}
}
}()
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.AppsV1().Deployments(ns).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
err = e2edeploy.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
@ -432,7 +432,7 @@ func testRolloverDeployment(f *framework.Framework) {
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %q", deploymentName)
newDeployment := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
MaxUnavailable: intOrStrP(0),
MaxSurge: intOrStrP(1),
@ -446,10 +446,10 @@ func testRolloverDeployment(f *framework.Framework) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
err = framework.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
err = e2edeploy.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
// Check if it's updated to revision 1 correctly
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Ensure that both replica sets have 1 created replica")
@ -463,7 +463,7 @@ func testRolloverDeployment(f *framework.Framework) {
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
})
@ -471,16 +471,16 @@ func testRolloverDeployment(f *framework.Framework) {
// Use observedGeneration to determine if the controller noticed the pod template update.
framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for it to be updated to revision 2
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Make sure deployment %q is complete", deploymentName)
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Ensure that both old replica sets have no replicas")
@ -515,17 +515,17 @@ func testRollbackDeployment(f *framework.Framework) {
deploymentImage := NginxImage
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
createAnnotation := map[string]string{"action": "create", "author": "node"}
d.Annotations = createAnnotation
deploy, err := c.AppsV1().Deployments(ns).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deploy)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Current newRS annotation should be "create"
@ -536,7 +536,7 @@ func testRollbackDeployment(f *framework.Framework) {
updatedDeploymentImage := RedisImage
updatedDeploymentImageName := RedisImageName
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
deployment, err := frameworkdeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
deployment, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
update.Annotations = updateAnnotation
@ -544,14 +544,14 @@ func testRollbackDeployment(f *framework.Framework) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update.
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for it to be updated to revision 2
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Current newRS annotation should be "update"
@ -566,15 +566,15 @@ func testRollbackDeployment(f *framework.Framework) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// TODO: report RollbackDone in deployment status and check it here
// Wait for it to be updated to revision 3
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Current newRS annotation should be "create", after the rollback
@ -588,14 +588,14 @@ func testRollbackDeployment(f *framework.Framework) {
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for it to be updated to revision 4
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Current newRS annotation should be "update", after the rollback
@ -611,13 +611,13 @@ func testRollbackDeployment(f *framework.Framework) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// TODO: report RollbackRevisionNotFound in deployment status and check it here
// The pod template shouldn't change since there's no revision 10
// Check if it's still revision 4 and still has the old pod template
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// 6. Update the deploymentRollback to rollback to revision 4
@ -629,13 +629,13 @@ func testRollbackDeployment(f *framework.Framework) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// TODO: report RollbackTemplateUnchanged in deployment status and check it here
// The pod template shouldn't change since it's already revision 4
// Check if it's still revision 4 and still has the old pod template
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
@ -664,7 +664,7 @@ func testIterativeDeployments(f *framework.Framework) {
// Create a nginx deployment.
deploymentName := "nginx"
thirty := int32(30)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
@ -682,7 +682,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.2:
// trigger a new deployment
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i)
@ -692,7 +692,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.4:
// rollback to the previous version
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
if update.Annotations == nil {
update.Annotations = make(map[string]string)
}
@ -703,7 +703,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.6:
// just scaling
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
randomScale(update, i)
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
@ -712,14 +712,14 @@ func testIterativeDeployments(f *framework.Framework) {
// toggling the deployment
if deployment.Spec.Paused {
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Paused = true
randomScale(update, i)
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else {
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Paused = false
randomScale(update, i)
})
@ -756,19 +756,19 @@ func testIterativeDeployments(f *framework.Framework) {
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if deployment.Spec.Paused {
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Paused = false
})
}
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
gomega.Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
framework.Logf("Waiting for deployment %q status", deploymentName)
gomega.Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
gomega.Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(gomega.HaveOccurred())
gomega.Expect(e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(gomega.HaveOccurred())
}
func testDeploymentsControllerRef(f *framework.Framework) {
@ -779,10 +779,10 @@ func testDeploymentsControllerRef(f *framework.Framework) {
framework.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deploy)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
@ -806,10 +806,10 @@ func testDeploymentsControllerRef(f *framework.Framework) {
deploymentName = "test-adopt-deployment"
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deploy)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
@ -836,7 +836,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Create a nginx deployment.
deploymentName := "nginx-deployment"
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(apps.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
@ -846,7 +846,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
gomega.Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
// Verify that the required pods have come up.
framework.Logf("Waiting for all required pods to come up")
@ -854,7 +854,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
gomega.Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
@ -862,13 +862,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Update the deployment with a non-existent image so that the new replica set
// will be blocked to simulate a partial rollout.
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
gomega.Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
// Checking state of first rollout's replicaset.
maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false)
@ -922,7 +922,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Scale the deployment to 30 replicas.
newReplicas = int32(30)
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Replicas = &newReplicas
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())

View File

@ -54,6 +54,7 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils:go_default_library",

View File

@ -36,6 +36,7 @@ import (
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -202,7 +203,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() {
podLabels := map[string]string{"name": "audit-deployment-pod"}
d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), apps.RecreateDeploymentStrategyType)
d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), apps.RecreateDeploymentStrategyType)
_, err := f.ClientSet.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "failed to create audit-deployment")

View File

@ -77,6 +77,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/volume:go_default_library",
"//test/utils:go_default_library",

View File

@ -32,6 +32,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
testutils "k8s.io/kubernetes/test/utils"
@ -476,7 +477,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
dpConfig := testutils.DeploymentConfig{
RCConfig: rcConfig,
}
framework.ExpectNoError(framework.RunDeployment(dpConfig))
framework.ExpectNoError(e2edeploy.RunDeployment(dpConfig))
break
case KindReplicaSet:
rsConfig := testutils.ReplicaSetConfig{

View File

@ -7,7 +7,6 @@ go_library(
srcs = [
"cleanup.go",
"create.go",
"deployment_util.go",
"exec_util.go",
"flake_reporting_util.go",
"framework.go",
@ -44,7 +43,6 @@ go_library(
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/controller/service:go_default_library",
"//pkg/features:go_default_library",

View File

@ -4,13 +4,27 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["utils.go"],
srcs = [
"fixtures.go",
"logging.go",
"wait.go",
],
importpath = "k8s.io/kubernetes/test/e2e/framework/deployment",
deps = [
"//pkg/controller/deployment/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package deployment
import (
"context"
@ -32,54 +32,63 @@ import (
clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// WaitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*apps.ReplicaSet
var d *apps.Deployment
// UpdateDeploymentWithRetries updates the specified deployment with retries.
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, e2elog.Logf, poll, pollShortTimeout)
}
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
}
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
if d.Spec.Strategy.Type != apps.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}
w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return false, err
return err
}
d = deployment
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
if err != nil {
return false, err
status := d.Status
condition := func(event watch.Event) (bool, error) {
d := event.Object.(*apps.Deployment)
status = d.Status
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
if err == nil && nerr == nil {
e2elog.Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
}
return len(oldRSs) == desiredRSNum, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
logReplicaSetsOfDeployment(d, oldRSs, nil)
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
}
return pollErr
}
func logReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) {
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
}
return *(d.Spec.Replicas) == d.Status.Replicas &&
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
d.Generation <= d.Status.ObservedGeneration, nil
}
// WaitForObservedDeployment waits for the specified deployment generation.
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
}
// WaitForDeploymentWithCondition waits for the specified deployment condition.
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error {
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, PollLongTimeout)
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
// may result in taking longer to relabel a RS.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, PollLongTimeout)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
return err
}
// NewDeployment returns a deployment spec with the specified argument.
@ -114,114 +123,14 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
}
}
// WaitForDeploymentComplete waits for the deployment to complete, and don't check if rolling update strategy is broken.
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
// such as shortly after a scaling event or the deployment is just created.
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error {
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, PollLongTimeout)
}
// WaitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error {
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, PollLongTimeout)
}
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
return testutils.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, PollLongTimeout)
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, Poll, PollShortTimeout)
}
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
if d.Spec.Strategy.Type != apps.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}
w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return err
}
status := d.Status
condition := func(event watch.Event) (bool, error) {
d := event.Object.(*apps.Deployment)
status = d.Status
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
if err == nil && nerr == nil {
Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
}
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
}
return *(d.Spec.Replicas) == d.Status.Replicas &&
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
d.Generation <= d.Status.ObservedGeneration, nil
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
return err
}
// RunDeployment runs a delopyment with the specified config.
func RunDeployment(config testutils.DeploymentConfig) error {
ginkgo.By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunDeployment(config)
}
func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet) {
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
}
// WaitForDeploymentRevision waits for becoming the target revision of a delopyment.
func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error {
err := wait.PollImmediate(Poll, PollLongTimeout, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
revision := deployment.Annotations[deploymentutil.RevisionAnnotation]
return revision == targetRevision, nil
})
if err != nil {
return fmt.Errorf("error waiting for revision to become %q for deployment %q: %v", targetRevision, d.Name, err)
}
return nil
}
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
}
// CreateDeployment creates a deployment.
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*apps.Deployment, error) {
deploymentSpec := MakeDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec)
if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
}
Logf("Waiting deployment %q to complete", deploymentSpec.Name)
e2elog.Logf("Waiting deployment %q to complete", deploymentSpec.Name)
err = WaitForDeploymentComplete(client, deployment)
if err != nil {
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)
@ -229,9 +138,37 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
return deployment, nil
}
// MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
// GetPodsForDeployment gets pods for the given deployment
func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
if err != nil {
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
}
if replicaSet == nil {
return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
}
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return client.CoreV1().Pods(namespace).List(options)
}
rsList := []*apps.ReplicaSet{replicaSet}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
if err != nil {
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
}
return podList, nil
}
// RunDeployment runs a delopyment with the specified config.
func RunDeployment(config testutils.DeploymentConfig) error {
ginkgo.By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = framework.DumpNodeDebugInfo
config.ContainerDumpFunc = framework.LogFailedContainers
return testutils.RunDeployment(config)
}
// testDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *apps.Deployment {
func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *apps.Deployment {
if len(command) == 0 {
command = "trap exit TERM; while true; do sleep 1; done"
}
@ -283,23 +220,3 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
}
return deploymentSpec
}
// GetPodsForDeployment gets pods for the given deployment
func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
if err != nil {
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
}
if replicaSet == nil {
return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
}
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return client.CoreV1().Pods(namespace).List(options)
}
rsList := []*apps.ReplicaSet{replicaSet}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
if err != nil {
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
}
return podList, nil
}

View File

@ -19,11 +19,14 @@ package deployment
import (
apps "k8s.io/api/apps/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
// UpdateDeploymentWithRetries updates the specified deployment with retries.
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, framework.Logf, framework.Poll, framework.PollShortTimeout)
func logReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) {
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, e2elog.Logf)
}
func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet) {
testutils.LogPodsOfDeployment(c, deployment, rsList, e2elog.Logf)
}

View File

@ -0,0 +1,119 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"time"
apps "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
const (
// poll is how often to poll pods, nodes and claims.
poll = 2 * time.Second
pollShortTimeout = 1 * time.Minute
pollLongTimeout = 5 * time.Minute
)
// WaitForObservedDeployment waits for the specified deployment generation.
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
}
// WaitForDeploymentWithCondition waits for the specified deployment condition.
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error {
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, e2elog.Logf, poll, pollLongTimeout)
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
// may result in taking longer to relabel a RS.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, e2elog.Logf, poll, pollLongTimeout)
}
// WaitForDeploymentComplete waits for the deployment to complete, and don't check if rolling update strategy is broken.
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
// such as shortly after a scaling event or the deployment is just created.
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error {
return testutils.WaitForDeploymentComplete(c, d, e2elog.Logf, poll, pollLongTimeout)
}
// WaitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error {
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, e2elog.Logf, poll, pollLongTimeout)
}
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
return testutils.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, poll, pollLongTimeout)
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, poll, pollShortTimeout)
}
// WaitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*apps.ReplicaSet
var d *apps.Deployment
pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
d = deployment
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
logReplicaSetsOfDeployment(d, oldRSs, nil)
}
return pollErr
}
// WaitForDeploymentRevision waits for becoming the target revision of a delopyment.
func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error {
err := wait.PollImmediate(poll, pollLongTimeout, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
revision := deployment.Annotations[deploymentutil.RevisionAnnotation]
return revision == targetRevision, nil
})
if err != nil {
return fmt.Errorf("error waiting for revision to become %q for deployment %q: %v", targetRevision, d.Name, err)
}
return nil
}

View File

@ -65,6 +65,7 @@ go_library(
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",

View File

@ -29,6 +29,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -143,7 +144,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
Expect(len(pvs)).To(Equal(1))
By("Creating a deployment with the provisioned volume")
deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
@ -163,7 +164,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
By("Getting a pod from deployment")
podList, err := framework.GetPodsForDeployment(c, deployment)
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]

View File

@ -31,6 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -114,7 +115,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
// Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted.
// We should consider adding a unit test that exercises this better.
By("Creating a deployment with selected PVC")
deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
@ -140,7 +141,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
By("Getting a pod from deployment")
podList, err := framework.GetPodsForDeployment(c, deployment)
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
@ -164,7 +165,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *apps.Deployment) (v1.Pod, error) {
var runningPod v1.Pod
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
for _, pod := range podList.Items {
switch pod.Status.Phase {
case v1.PodRunning:

View File

@ -52,6 +52,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@ -32,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -93,12 +94,12 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
volumePath := pvs[0].Spec.VsphereVolume.VolumePath
By("Creating a Deployment")
deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err))
defer client.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Get pod from the deployement")
podList, err := framework.GetPodsForDeployment(client, deployment)
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployement with err: %v", err))
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
@ -178,7 +179,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *apps.Deploymen
// getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *apps.Deployment) (string, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
if err != nil {
return "", err
}

View File

@ -24,7 +24,7 @@ import (
"k8s.io/apimachinery/pkg/types"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework"
frameworkdeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/upgrades"
"github.com/onsi/ginkgo"
@ -60,12 +60,12 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
rsClient := c.AppsV1().ReplicaSets(ns)
ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
@ -79,17 +79,17 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
t.oldRSUID = rss[0].UID
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "1"))
framework.ExpectNoError(e2edeploy.WaitForDeploymentRevision(c, deployment, "1"))
// Trigger a new rollout so that we have some history.
ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
})
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
@ -110,7 +110,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
}
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "2"))
framework.ExpectNoError(e2edeploy.WaitForDeploymentRevision(c, deployment, "2"))
t.oldDeploymentUID = deployment.UID
}
@ -155,17 +155,17 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
gomega.Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(gomega.Equal("2"))
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
// Verify the upgraded deployment is active by scaling up the deployment by 1
ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
_, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
_, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
})
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
}
// Teardown cleans up any remaining resources.