mirror of https://github.com/k3s-io/k3s
Merge pull request #55439 from crimsonfaith91/revamp
Automatic merge from submit-queue (batch tested with PRs 55439, 58564, 59028, 59169, 59259). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. add basic functionality deployment integration tests **What this PR does / why we need it**: This PR adds basic deployment integration tests. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: xref #52113 **Release note**: ```release-note NONE ```pull/6/head
commit
61f58ebfb2
|
@ -423,7 +423,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
err = framework.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
|
|
|
@ -127,9 +127,9 @@ func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensio
|
|||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||
return testutils.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
|
||||
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||
return testutils.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
|
|
|
@ -18,12 +18,15 @@ go_test(
|
|||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -26,8 +26,11 @@ import (
|
|||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
|
@ -775,7 +778,7 @@ func TestFailedDeployment(t *testing.T) {
|
|||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
if err = tester.waitForDeploymentUpdatedReplicasLTE(replicas); err != nil {
|
||||
if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -1068,3 +1071,311 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpecReplicasChange(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-spec-replicas-change"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
tester.deployment.Spec.Strategy.Type = v1beta1.RecreateDeploymentStrategyType
|
||||
tester.deployment.Spec.Strategy.RollingUpdate = nil
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Scale up/down deployment and verify its replicaset has matching .spec.replicas
|
||||
if err = tester.scaleDeployment(2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = tester.scaleDeployment(0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = tester.scaleDeployment(1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add a template annotation change to test deployment's status does update
|
||||
// without .spec.replicas change
|
||||
var oldGeneration int64
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
oldGeneration = update.Generation
|
||||
update.Spec.RevisionHistoryLimit = pointer.Int32Ptr(4)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed updating deployment %q: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
savedGeneration := tester.deployment.Generation
|
||||
if savedGeneration == oldGeneration {
|
||||
t.Fatalf("Failed to verify .Generation has incremented for deployment %q", deploymentName)
|
||||
}
|
||||
if err = tester.waitForObservedDeployment(savedGeneration); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentAvailableCondition(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-deployment-available-condition"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(10)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
// Assign a high value to the deployment's minReadySeconds
|
||||
tester.deployment.Spec.MinReadySeconds = 3600
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the deployment to be observed by the controller and has at least specified number of updated replicas
|
||||
if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to have MinimumReplicasUnavailable reason because the pods are not marked as ready
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired counts
|
||||
if err = tester.checkDeploymentStatusReplicasFields(10, 10, 0, 0, 10); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Mark the pods as ready without waiting for the deployment to complete
|
||||
if err = tester.markUpdatedPodsReadyWithoutComplete(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for number of ready replicas to equal number of replicas.
|
||||
if err = tester.waitForReadyReplicas(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to still have MinimumReplicasUnavailable reason within minReadySeconds period
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired counts
|
||||
if err = tester.checkDeploymentStatusReplicasFields(10, 10, 10, 0, 10); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Update the deployment's minReadySeconds to a small value
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
update.Spec.MinReadySeconds = 1
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed updating deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to notice minReadySeconds has changed
|
||||
if err := tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to have MinimumReplicasAvailable reason after minReadySeconds period
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasAvailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired counts
|
||||
if err = tester.checkDeploymentStatusReplicasFields(10, 10, 10, 10, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for deployment to automatically patch incorrect ControllerRef of RS
|
||||
func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *v1beta1.ReplicaSet, ownerReference *metav1.OwnerReference, expectedOwnerReferenceNum int) {
|
||||
ns := rs.Namespace
|
||||
rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns)
|
||||
rs, err := tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
update.OwnerReferences = []metav1.OwnerReference{*ownerReference}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return metav1.GetControllerOf(newRS) != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to wait for controllerRef of the replicaset %q to become nil: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to obtain replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
controllerRef := metav1.GetControllerOf(newRS)
|
||||
if controllerRef.UID != tester.deployment.UID {
|
||||
t.Fatalf("controllerRef of replicaset %q has a different UID: Expected %v, got %v", newRS.Name, tester.deployment.UID, controllerRef.UID)
|
||||
}
|
||||
ownerReferenceNum := len(newRS.GetOwnerReferences())
|
||||
if ownerReferenceNum != expectedOwnerReferenceNum {
|
||||
t.Fatalf("unexpected number of owner references for replicaset %q: Expected %d, got %d", newRS.Name, expectedOwnerReferenceNum, ownerReferenceNum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneralReplicaSetAdoption(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-general-replicaset-adoption"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the deployment completes while marking its pods as ready simultaneously
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get replicaset of the deployment
|
||||
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if rs == nil {
|
||||
t.Fatalf("unable to find replicaset of deployment %q", deploymentName)
|
||||
}
|
||||
|
||||
// When the only OwnerReference of the RS points to another type of API object such as statefulset
|
||||
// with Controller=false, the deployment should add a second OwnerReference (ControllerRef) pointing to itself
|
||||
// with Controller=true
|
||||
var falseVar = false
|
||||
ownerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "apps/v1beta1", Kind: "StatefulSet", Name: deploymentName, Controller: &falseVar}
|
||||
testRSControllerRefPatch(t, tester, rs, &ownerReference, 2)
|
||||
|
||||
// When the only OwnerReference of the RS points to the deployment with Controller=false,
|
||||
// the deployment should set Controller=true for the only OwnerReference
|
||||
ownerReference = metav1.OwnerReference{UID: tester.deployment.UID, APIVersion: "extensions/v1beta1", Kind: "Deployment", Name: deploymentName, Controller: &falseVar}
|
||||
testRSControllerRefPatch(t, tester, rs, &ownerReference, 1)
|
||||
}
|
||||
|
||||
func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, replicas int32) {
|
||||
ns := tester.deployment.Namespace
|
||||
deploymentName := tester.deployment.Name
|
||||
deploymentClient := tester.c.ExtensionsV1beta1().Deployments(ns)
|
||||
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
kind := "Deployment"
|
||||
scaleClient := tester.c.ExtensionsV1beta1().Scales(ns)
|
||||
scale, err := scaleClient.Get(kind, deploymentName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain scale subresource for deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if scale.Spec.Replicas != *deployment.Spec.Replicas {
|
||||
t.Fatalf("Scale subresource for deployment %q does not match .Spec.Replicas: expected %d, got %d", deploymentName, *deployment.Spec.Replicas, scale.Spec.Replicas)
|
||||
}
|
||||
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
scale, err := scaleClient.Get(kind, deploymentName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scale.Spec.Replicas = replicas
|
||||
_, err = scaleClient.Update(kind, scale)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to set .Spec.Replicas of scale subresource for deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
deployment, err = deploymentClient.Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if *deployment.Spec.Replicas != replicas {
|
||||
t.Fatalf(".Spec.Replicas of deployment %q does not match its scale subresource: expected %d, got %d", deploymentName, replicas, *deployment.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentScaleSubresource(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-deployment-scale-subresource"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(2)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the deployment completes while marking its pods as ready simultaneously
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Use scale subresource to scale the deployment up to 3
|
||||
testScalingUsingScaleSubresource(t, tester, 3)
|
||||
// Use the scale subresource to scale the deployment down to 0
|
||||
testScalingUsingScaleSubresource(t, tester, 0)
|
||||
}
|
||||
|
|
|
@ -368,6 +368,10 @@ func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.Up
|
|||
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
|
||||
return testutil.UpdateReplicaSetStatusWithRetries(d.c, d.deployment.Namespace, name, applyStatusUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// waitForDeploymentRollbackCleared waits for deployment either started rolling back or doesn't need to rollback.
|
||||
func (d *deploymentTester) waitForDeploymentRollbackCleared() error {
|
||||
return testutil.WaitForDeploymentRollbackCleared(d.c, d.deployment.Namespace, d.deployment.Name, pollInterval, pollTimeout)
|
||||
|
@ -378,8 +382,8 @@ func (d *deploymentTester) checkDeploymentRevisionAndImage(revision, image strin
|
|||
return testutil.CheckDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentUpdatedReplicasLTE(minUpdatedReplicas int32) error {
|
||||
return testutil.WaitForDeploymentUpdatedReplicasLTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
|
||||
func (d *deploymentTester) waitForDeploymentUpdatedReplicasGTE(minUpdatedReplicas int32) error {
|
||||
return testutil.WaitForDeploymentUpdatedReplicasGTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType v1beta1.DeploymentConditionType) error {
|
||||
|
@ -416,3 +420,89 @@ func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) {
|
|||
func (d *deploymentTester) waitRSStable(replicaset *v1beta1.ReplicaSet) error {
|
||||
return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) scaleDeployment(newReplicas int32) error {
|
||||
var err error
|
||||
d.deployment, err = d.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed updating deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
|
||||
if err := d.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rs, err := d.expectNewReplicaSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *rs.Spec.Replicas != newReplicas {
|
||||
return fmt.Errorf("expected new replicaset replicas = %d, got %d", newReplicas, *rs.Spec.Replicas)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForReadyReplicas waits for number of ready replicas to equal number of replicas.
|
||||
func (d *deploymentTester) waitForReadyReplicas() error {
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas, nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to wait for .readyReplicas to equal .replicas: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// markUpdatedPodsReadyWithoutComplete marks updated Deployment pods as ready without waiting for deployment to complete.
|
||||
func (d *deploymentTester) markUpdatedPodsReadyWithoutComplete() error {
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
pods, err := d.listUpdatedPods()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range pods {
|
||||
pod := pods[i]
|
||||
if podutil.IsPodReady(&pod) {
|
||||
continue
|
||||
}
|
||||
if err = markPodReady(d.c, d.deployment.Namespace, &pod); err != nil {
|
||||
d.t.Logf("failed to update Deployment pod %q, will retry later: %v", pod.Name, err)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to mark all updated pods as ready: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired count.
|
||||
// Immediately return an error when found a non-matching replicas field.
|
||||
func (d *deploymentTester) checkDeploymentStatusReplicasFields(replicas, updatedReplicas, readyReplicas, availableReplicas, unavailableReplicas int32) error {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
if deployment.Status.Replicas != replicas {
|
||||
return fmt.Errorf("unexpected .replicas: expect %d, got %d", replicas, deployment.Status.Replicas)
|
||||
}
|
||||
if deployment.Status.UpdatedReplicas != updatedReplicas {
|
||||
return fmt.Errorf("unexpected .updatedReplicas: expect %d, got %d", updatedReplicas, deployment.Status.UpdatedReplicas)
|
||||
}
|
||||
if deployment.Status.ReadyReplicas != readyReplicas {
|
||||
return fmt.Errorf("unexpected .readyReplicas: expect %d, got %d", readyReplicas, deployment.Status.ReadyReplicas)
|
||||
}
|
||||
if deployment.Status.AvailableReplicas != availableReplicas {
|
||||
return fmt.Errorf("unexpected .replicas: expect %d, got %d", availableReplicas, deployment.Status.AvailableReplicas)
|
||||
}
|
||||
if deployment.Status.UnavailableReplicas != unavailableReplicas {
|
||||
return fmt.Errorf("unexpected .replicas: expect %d, got %d", unavailableReplicas, deployment.Status.UnavailableReplicas)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -302,8 +302,8 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
|
|||
return nil
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64, pollInterval, pollTimeout time.Duration) error {
|
||||
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64, pollInterval, pollTimeout time.Duration) error {
|
||||
var deployment *extensions.Deployment
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
d, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
|
|
|
@ -66,3 +66,26 @@ func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *extensions.Re
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpdateReplicaSetStatusWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.ReplicaSet, error) {
|
||||
var rs *extensions.ReplicaSet
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rs)
|
||||
if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).UpdateStatus(rs); err == nil {
|
||||
logf("Updating replica set %q", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided update to replicaset %q: %v", name, updateErr)
|
||||
}
|
||||
return rs, pollErr
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue