mirror of https://github.com/k3s-io/k3s
Refeactored framework deployment utils
xref: #76206 Signed-off-by: Jorge Alarcon Ochoa <alarcj137@gmail.com>k3s-v1.15.3
parent
d308368b2d
commit
8200ae6279
|
@ -61,6 +61,7 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
frameworkdeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
@ -320,7 +321,7 @@ func testRecreateDeployment(f *framework.Framework) {
|
|||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
|
@ -462,7 +463,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
})
|
||||
|
@ -535,7 +536,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
updatedDeploymentImage := RedisImage
|
||||
updatedDeploymentImageName := RedisImageName
|
||||
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
|
||||
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
deployment, err := frameworkdeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
update.Annotations = updateAnnotation
|
||||
|
@ -681,7 +682,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
case n < 0.2:
|
||||
// trigger a new deployment
|
||||
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
|
||||
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
|
||||
randomScale(update, i)
|
||||
|
@ -691,7 +692,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
case n < 0.4:
|
||||
// rollback to the previous version
|
||||
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
if update.Annotations == nil {
|
||||
update.Annotations = make(map[string]string)
|
||||
}
|
||||
|
@ -702,7 +703,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
case n < 0.6:
|
||||
// just scaling
|
||||
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
@ -711,14 +712,14 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
// toggling the deployment
|
||||
if deployment.Spec.Paused {
|
||||
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = true
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
} else {
|
||||
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
randomScale(update, i)
|
||||
})
|
||||
|
@ -755,7 +756,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
if deployment.Spec.Paused {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
})
|
||||
}
|
||||
|
@ -861,7 +862,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
// Update the deployment with a non-existent image so that the new replica set
|
||||
// will be blocked to simulate a partial rollout.
|
||||
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
@ -921,7 +922,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
// Scale the deployment to 30 replicas.
|
||||
newReplicas = int32(30)
|
||||
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
|
|
@ -147,6 +147,7 @@ filegroup(
|
|||
":package-srcs",
|
||||
"//test/e2e/framework/auth:all-srcs",
|
||||
"//test/e2e/framework/config:all-srcs",
|
||||
"//test/e2e/framework/deployment:all-srcs",
|
||||
"//test/e2e/framework/ginkgowrapper:all-srcs",
|
||||
"//test/e2e/framework/gpu:all-srcs",
|
||||
"//test/e2e/framework/ingress:all-srcs",
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["utils.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/deployment",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// UpdateDeploymentWithRetries updates the specified deployment with retries.
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
|
||||
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, framework.Logf, framework.Poll, framework.PollShortTimeout)
|
||||
}
|
|
@ -36,11 +36,6 @@ import (
|
|||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// UpdateDeploymentWithRetries updates the specified deployment with retries.
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
|
||||
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, PollShortTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
|
||||
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*apps.ReplicaSet
|
||||
|
|
|
@ -27,6 +27,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/e2e/upgrades:go_default_library",
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
frameworkdeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
@ -82,7 +83,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
|
|||
|
||||
// Trigger a new rollout so that we have some history.
|
||||
ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
|
||||
deployment, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -158,7 +159,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
|
|||
|
||||
// Verify the upgraded deployment is active by scaling up the deployment by 1
|
||||
ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
|
||||
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
|
||||
_, err = frameworkdeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
|
||||
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
|
Loading…
Reference in New Issue