mirror of https://github.com/k3s-io/k3s
Merge pull request #76409 from draveness/feature/refactor-framework-rs-util
refactor: cleanup e2e replicaset utilsk3s-v1.15.3
commit
ed2bdd53dc
|
@ -61,6 +61,7 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
@ -421,7 +422,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
Expect(framework.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(HaveOccurred())
|
||||
Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(HaveOccurred())
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
|
@ -527,7 +528,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create"
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 2. Update the deployment to create redis pods.
|
||||
|
@ -553,7 +554,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update"
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 3. Update the deploymentRollback to rollback to revision 1
|
||||
|
@ -576,7 +577,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create", after the rollback
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 4. Update the deploymentRollback to rollback to last revision
|
||||
|
@ -597,7 +598,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update", after the rollback
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 5. Update the deploymentRollback to rollback to revision 10
|
||||
|
@ -875,17 +876,17 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
|
||||
minAvailableReplicas := replicas - int32(maxUnavailable)
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
Expect(framework.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 too.
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Checking state of second rollout's replicaset.
|
||||
|
@ -902,13 +903,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
|
||||
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
|
||||
framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(HaveOccurred())
|
||||
Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check the deployment's minimum availability.
|
||||
|
@ -934,12 +935,12 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
|
||||
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
|
||||
framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(HaveOccurred())
|
||||
Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(HaveOccurred())
|
||||
|
||||
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
|
||||
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
|
||||
framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(HaveOccurred())
|
||||
Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -228,7 +229,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
|
||||
rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
|
||||
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
|
|
|
@ -77,6 +77,7 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -482,7 +483,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
|
|||
RCConfig: rcConfig,
|
||||
}
|
||||
By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
|
||||
framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
|
||||
framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig))
|
||||
break
|
||||
default:
|
||||
framework.Failf(invalidKind)
|
||||
|
|
|
@ -28,7 +28,6 @@ go_library(
|
|||
"pv_util.go",
|
||||
"rc_util.go",
|
||||
"resource_usage_gatherer.go",
|
||||
"rs_util.go",
|
||||
"service_util.go",
|
||||
"size.go",
|
||||
"ssh.go",
|
||||
|
@ -106,7 +105,6 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
|
@ -165,6 +163,7 @@ filegroup(
|
|||
"//test/e2e/framework/providers/kubemark:all-srcs",
|
||||
"//test/e2e/framework/providers/openstack:all-srcs",
|
||||
"//test/e2e/framework/providers/vsphere:all-srcs",
|
||||
"//test/e2e/framework/replicaset:all-srcs",
|
||||
"//test/e2e/framework/testfiles:all-srcs",
|
||||
"//test/e2e/framework/timer:all-srcs",
|
||||
"//test/e2e/framework/viperconfig:all-srcs",
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
|
||||
// UpdateDeploymentWithRetries updates the specified deployment with retries.
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
|
||||
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
|
||||
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, PollShortTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
|
||||
|
@ -77,14 +77,14 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
|
|||
|
||||
// WaitForDeploymentWithCondition waits for the specified deployment condition.
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error {
|
||||
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, pollLongTimeout)
|
||||
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, PollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
|
||||
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
|
||||
// may result in taking longer to relabel a RS.
|
||||
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
|
||||
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
|
||||
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, PollLongTimeout)
|
||||
}
|
||||
|
||||
// NewDeployment returns a deployment spec with the specified argument.
|
||||
|
@ -123,24 +123,24 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
|
|||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error {
|
||||
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, pollLongTimeout)
|
||||
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, PollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error {
|
||||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
|
||||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, PollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||
return testutils.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
|
||||
return testutils.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, PollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
|
||||
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, Poll, pollShortTimeout)
|
||||
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, Poll, PollShortTimeout)
|
||||
}
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
|
@ -200,7 +200,7 @@ func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsL
|
|||
|
||||
// WaitForDeploymentRevision waits for becoming the target revision of a delopyment.
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error {
|
||||
err := wait.PollImmediate(Poll, pollLongTimeout, func() (bool, error) {
|
||||
err := wait.PollImmediate(Poll, PollLongTimeout, func() (bool, error) {
|
||||
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fixtures.go",
|
||||
"rest.go",
|
||||
"wait.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/replicaset",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replicaset
|
||||
|
||||
import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// NewReplicaSet returns a new ReplicaSet.
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replicaset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// UpdateReplicaSetWithRetries updates replicaset template with retries.
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
|
||||
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, framework.Logf, framework.Poll, framework.PollShortTimeout)
|
||||
}
|
||||
|
||||
// CheckNewRSAnnotations check if the new RS's annotation is as expected
|
||||
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range expectedAnnotations {
|
||||
// Skip checking revision annotations
|
||||
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
|
||||
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -14,52 +14,32 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package replicaset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
type updateRsFunc func(d *apps.ReplicaSet)
|
||||
|
||||
// UpdateReplicaSetWithRetries updates replicaset template with retries.
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
|
||||
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// CheckNewRSAnnotations check if the new RS's annotation is as expected
|
||||
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range expectedAnnotations {
|
||||
// Skip checking revision annotations
|
||||
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
|
||||
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
// RunReplicaSet launches (and verifies correctness) of a replicaset.
|
||||
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
|
||||
ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = framework.DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = framework.LogFailedContainers
|
||||
return testutils.RunReplicaSet(config)
|
||||
}
|
||||
|
||||
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
|
||||
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
|
||||
err := wait.Poll(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -75,7 +55,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
|||
// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
|
||||
func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *apps.ReplicaSet) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -91,7 +71,7 @@ func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, rep
|
|||
// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -107,7 +87,7 @@ func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps
|
|||
// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -119,44 +99,3 @@ func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet
|
|||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RunReplicaSet launches (and verifies correctness) of a replicaset.
|
||||
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
|
||||
ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunReplicaSet(config)
|
||||
}
|
||||
|
||||
// NewReplicaSet returns a new ReplicaSet.
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -129,8 +129,8 @@ const (
|
|||
// How often to Poll pods, nodes and claims.
|
||||
Poll = 2 * time.Second
|
||||
|
||||
pollShortTimeout = 1 * time.Minute
|
||||
pollLongTimeout = 5 * time.Minute
|
||||
PollShortTimeout = 1 * time.Minute
|
||||
PollLongTimeout = 5 * time.Minute
|
||||
|
||||
// service accounts are provisioned after namespace creation
|
||||
// a service account is required to support pod creation in a namespace as part of admission control
|
||||
|
|
|
@ -46,6 +46,7 @@ go_library(
|
|||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/gpu:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -620,6 +621,6 @@ func createPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSe
|
|||
|
||||
func runPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet {
|
||||
rs := createPauseRS(f, conf)
|
||||
framework.ExpectNoError(framework.WaitForReplicaSetTargetAvailableReplicas(f.ClientSet, rs, conf.Replicas))
|
||||
framework.ExpectNoError(replicaset.WaitForReplicaSetTargetAvailableReplicas(f.ClientSet, rs, conf.Replicas))
|
||||
return rs
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/e2e/upgrades:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
@ -55,12 +56,12 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
|
|||
nginxImage := imageutils.GetE2EImage(imageutils.Nginx)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
|
||||
replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
|
||||
replicaSet := replicaset.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
|
||||
rs, err := c.AppsV1().ReplicaSets(ns).Create(replicaSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
|
||||
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
|
||||
framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName))
|
||||
|
||||
r.UID = rs.UID
|
||||
}
|
||||
|
@ -84,17 +85,17 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
|
|||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
|
||||
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
|
||||
framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName))
|
||||
|
||||
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
|
||||
ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
|
||||
_, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *apps.ReplicaSet) {
|
||||
_, err = replicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *apps.ReplicaSet) {
|
||||
*rs.Spec.Replicas = scaleNum
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
|
||||
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
|
||||
framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName))
|
||||
}
|
||||
|
||||
// Teardown cleans up any remaining resources.
|
||||
|
|
Loading…
Reference in New Issue