Merge pull request #61419 from enisoc/apps-v1-deploy

Automatic merge from submit-queue (batch tested with PRs 62756, 63862, 61419, 64015, 64063). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Use apps/v1 Deployment/ReplicaSet in controller and kubectl

This updates the Deployment controller and integration/e2e tests to use apps/v1, as part of #55714.

This also requires updating any other components that use the `deployment/util` package, most notably `kubectl`. That means client versions 1.11 and above will only work with server versions 1.9 and above. This is well within our client-server version skew policy of +/-1 minor version.

However, this PR *only* updates the parts of `kubectl` that used `deployment/util`. So although kubectl now requires apps/v1, it still also depends on extensions/v1beta1. Migrating other parts of kubectl to apps/v1 is beyond the scope of this PR, which was just to change the Deployment controller and fix all the fallout.

```release-note
kubectl: This client version requires the `apps/v1` APIs, so it will not work against a cluster version older than v1.9.0. Note that kubectl only guarantees compatibility with clusters that are +/-1 minor version away.
```
pull/8/head
Kubernetes Submit Queue 2018-05-23 18:14:13 -07:00 committed by GitHub
commit 5fe35cdbf9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 1043 additions and 1088 deletions

View File

@ -11,7 +11,6 @@ go_library(
"cloudproviders.go",
"controllermanager.go",
"core.go",
"extensions.go",
"import_known_versions.go",
"plugins.go",
"policy.go",

View File

@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/controller/deployment"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/pkg/controller/statefulset"
)
@ -73,3 +74,20 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) {
).Run(int(ctx.ComponentConfig.ReplicaSetController.ConcurrentRSSyncs), ctx.Stop)
return true, nil
}
func startDeploymentController(ctx ControllerContext) (bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}] {
return false, nil
}
dc, err := deployment.NewDeploymentController(
ctx.InformerFactory.Apps().V1().Deployments(),
ctx.InformerFactory.Apps().V1().ReplicaSets(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.ClientBuilder.ClientOrDie("deployment-controller"),
)
if err != nil {
return true, fmt.Errorf("error creating Deployment controller: %v", err)
}
go dc.Run(int(ctx.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Stop)
return true, nil
}

View File

@ -1,45 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and
// nodes.
//
package app
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/controller/deployment"
)
func startDeploymentController(ctx ControllerContext) (bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] {
return false, nil
}
dc, err := deployment.NewDeploymentController(
ctx.InformerFactory.Extensions().V1beta1().Deployments(),
ctx.InformerFactory.Extensions().V1beta1().ReplicaSets(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.ClientBuilder.ClientOrDie("deployment-controller"),
)
if err != nil {
return true, fmt.Errorf("error creating Deployment controller: %v", err)
}
go dc.Run(int(ctx.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Stop)
return true, nil
}

View File

@ -19,8 +19,8 @@ go_test(
"//pkg/controller/testutil:go_default_library",
"//pkg/securitycontext:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
@ -64,7 +64,6 @@ go_library(
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -23,7 +23,6 @@ import (
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -301,18 +300,18 @@ func NewReplicaSetControllerRefManager(
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ReplicaSets that you now own is
// returned.
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, error) {
var claimed []*extensions.ReplicaSet
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) {
var claimed []*apps.ReplicaSet
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.AdoptReplicaSet(obj.(*extensions.ReplicaSet))
return m.AdoptReplicaSet(obj.(*apps.ReplicaSet))
}
release := func(obj metav1.Object) error {
return m.ReleaseReplicaSet(obj.(*extensions.ReplicaSet))
return m.ReleaseReplicaSet(obj.(*apps.ReplicaSet))
}
for _, rs := range sets {
@ -330,7 +329,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
// the error if the patching fails.
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaSet) error {
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
}
@ -345,7 +344,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS
// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extensions.ReplicaSet) error {
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error {
glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID)

View File

@ -20,8 +20,8 @@ import (
"reflect"
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -52,7 +52,7 @@ func newPod(podName string, label map[string]string, owner metav1.Object) *v1.Po
},
}
if owner != nil {
pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, v1beta1.SchemeGroupVersion.WithKind("Fake"))}
pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, apps.SchemeGroupVersion.WithKind("Fake"))}
}
return pod
}

View File

@ -25,8 +25,8 @@ import (
"sync/atomic"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -818,18 +818,18 @@ func IsPodActive(p *v1.Pod) bool {
}
// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods.
func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions.ReplicaSet {
activeFilter := func(rs *extensions.ReplicaSet) bool {
func FilterActiveReplicaSets(replicaSets []*apps.ReplicaSet) []*apps.ReplicaSet {
activeFilter := func(rs *apps.ReplicaSet) bool {
return rs != nil && *(rs.Spec.Replicas) > 0
}
return FilterReplicaSets(replicaSets, activeFilter)
}
type filterRS func(rs *extensions.ReplicaSet) bool
type filterRS func(rs *apps.ReplicaSet) bool
// FilterReplicaSets returns replica sets that are filtered by filterFn (all returned ones should match filterFn).
func FilterReplicaSets(RSes []*extensions.ReplicaSet, filterFn filterRS) []*extensions.ReplicaSet {
var filtered []*extensions.ReplicaSet
func FilterReplicaSets(RSes []*apps.ReplicaSet, filterFn filterRS) []*apps.ReplicaSet {
var filtered []*apps.ReplicaSet
for i := range RSes {
if filterFn(RSes[i]) {
filtered = append(filtered, RSes[i])
@ -859,7 +859,7 @@ func (o ControllersByCreationTimestamp) Less(i, j int) bool {
}
// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet
type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
@ -872,7 +872,7 @@ func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
// ReplicaSetsBySizeOlder sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
// By using the creation timestamp, this sorts from old to new replica sets.
type ReplicaSetsBySizeOlder []*extensions.ReplicaSet
type ReplicaSetsBySizeOlder []*apps.ReplicaSet
func (o ReplicaSetsBySizeOlder) Len() int { return len(o) }
func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
@ -885,7 +885,7 @@ func (o ReplicaSetsBySizeOlder) Less(i, j int) bool {
// ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
// By using the creation timestamp, this sorts from new to old replica sets.
type ReplicaSetsBySizeNewer []*extensions.ReplicaSet
type ReplicaSetsBySizeNewer []*apps.ReplicaSet
func (o ReplicaSetsBySizeNewer) Len() int { return len(o) }
func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] }

View File

@ -27,8 +27,8 @@ import (
"testing"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -122,8 +122,8 @@ func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.Replica
}
}
func newReplicaSet(name string, replicas int) *extensions.ReplicaSet {
return &extensions.ReplicaSet{
func newReplicaSet(name string, replicas int) *apps.ReplicaSet {
return &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
@ -131,7 +131,7 @@ func newReplicaSet(name string, replicas int) *extensions.ReplicaSet {
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
Template: v1.PodTemplateSpec{
@ -417,7 +417,7 @@ func TestSortingActivePods(t *testing.T) {
}
func TestActiveReplicaSetsFiltering(t *testing.T) {
var replicaSets []*extensions.ReplicaSet
var replicaSets []*apps.ReplicaSet
replicaSets = append(replicaSets, newReplicaSet("zero", 0))
replicaSets = append(replicaSets, nil)
replicaSets = append(replicaSets, newReplicaSet("foo", 1))

View File

@ -23,6 +23,7 @@ go_library(
"//pkg/util/labels:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
@ -32,13 +33,13 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
@ -64,13 +65,13 @@ go_test(
"//pkg/apis/batch/install:go_default_library",
"//pkg/apis/certificates/install:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/extensions/install:go_default_library",
"//pkg/apis/policy/install:go_default_library",
"//pkg/apis/rbac/install:go_default_library",
"//pkg/apis/settings/install:go_default_library",
"//pkg/apis/storage/install:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -27,21 +27,21 @@ import (
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
@ -60,7 +60,7 @@ const (
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = extensions.SchemeGroupVersion.WithKind("Deployment")
var controllerKind = apps.SchemeGroupVersion.WithKind("Deployment")
// DeploymentController is responsible for synchronizing Deployment objects stored
// in the system with actual running replica sets and pods.
@ -73,12 +73,12 @@ type DeploymentController struct {
// To allow injection of syncDeployment for testing.
syncHandler func(dKey string) error
// used for unit testing
enqueueDeployment func(deployment *extensions.Deployment)
enqueueDeployment func(deployment *apps.Deployment)
// dLister can list/get deployments from the shared informer's store
dLister extensionslisters.DeploymentLister
dLister appslisters.DeploymentLister
// rsLister can list/get replica sets from the shared informer's store
rsLister extensionslisters.ReplicaSetLister
rsLister appslisters.ReplicaSetLister
// podLister can list/get pods from the shared informer's store
podLister corelisters.PodLister
@ -97,7 +97,7 @@ type DeploymentController struct {
}
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
@ -164,27 +164,27 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
}
func (dc *DeploymentController) addDeployment(obj interface{}) {
d := obj.(*extensions.Deployment)
d := obj.(*apps.Deployment)
glog.V(4).Infof("Adding deployment %s", d.Name)
dc.enqueueDeployment(d)
}
func (dc *DeploymentController) updateDeployment(old, cur interface{}) {
oldD := old.(*extensions.Deployment)
curD := cur.(*extensions.Deployment)
oldD := old.(*apps.Deployment)
curD := cur.(*apps.Deployment)
glog.V(4).Infof("Updating deployment %s", oldD.Name)
dc.enqueueDeployment(curD)
}
func (dc *DeploymentController) deleteDeployment(obj interface{}) {
d, ok := obj.(*extensions.Deployment)
d, ok := obj.(*apps.Deployment)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
d, ok = tombstone.Obj.(*extensions.Deployment)
d, ok = tombstone.Obj.(*apps.Deployment)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Deployment %#v", obj))
return
@ -196,7 +196,7 @@ func (dc *DeploymentController) deleteDeployment(obj interface{}) {
// addReplicaSet enqueues the deployment that manages a ReplicaSet when the ReplicaSet is created.
func (dc *DeploymentController) addReplicaSet(obj interface{}) {
rs := obj.(*extensions.ReplicaSet)
rs := obj.(*apps.ReplicaSet)
if rs.DeletionTimestamp != nil {
// On a restart of the controller manager, it's possible for an object to
@ -230,7 +230,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) {
// getDeploymentsForReplicaSet returns a list of Deployments that potentially
// match a ReplicaSet.
func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *extensions.ReplicaSet) []*extensions.Deployment {
func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet) []*apps.Deployment {
deployments, err := dc.dLister.GetDeploymentsForReplicaSet(rs)
if err != nil || len(deployments) == 0 {
return nil
@ -250,11 +250,11 @@ func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *extensions.Repli
// updateReplicaSet figures out what deployment(s) manage a ReplicaSet when the ReplicaSet
// is updated and wake them up. If the anything of the ReplicaSets have changed, we need to
// awaken both the old and new deployments. old and cur must be *extensions.ReplicaSet
// awaken both the old and new deployments. old and cur must be *apps.ReplicaSet
// types.
func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
curRS := cur.(*extensions.ReplicaSet)
oldRS := old.(*extensions.ReplicaSet)
curRS := cur.(*apps.ReplicaSet)
oldRS := old.(*apps.ReplicaSet)
if curRS.ResourceVersion == oldRS.ResourceVersion {
// Periodic resync will send update events for all known replica sets.
// Two different versions of the same replica set will always have different RVs.
@ -298,10 +298,10 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
}
// deleteReplicaSet enqueues the deployment that manages a ReplicaSet when
// the ReplicaSet is deleted. obj could be an *extensions.ReplicaSet, or
// the ReplicaSet is deleted. obj could be an *apps.ReplicaSet, or
// a DeletionFinalStateUnknown marker item.
func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
rs, ok := obj.(*extensions.ReplicaSet)
rs, ok := obj.(*apps.ReplicaSet)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
@ -313,7 +313,7 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
rs, ok = tombstone.Obj.(*extensions.ReplicaSet)
rs, ok = tombstone.Obj.(*apps.ReplicaSet)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a ReplicaSet %#v", obj))
return
@ -354,9 +354,9 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
}
}
glog.V(4).Infof("Pod %s deleted.", pod.Name)
if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == extensions.RecreateDeploymentStrategyType {
if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType {
// Sync if this Deployment now has no more Pods.
rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.ExtensionsV1beta1()))
rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1()))
if err != nil {
return
}
@ -374,7 +374,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
}
}
func (dc *DeploymentController) enqueue(deployment *extensions.Deployment) {
func (dc *DeploymentController) enqueue(deployment *apps.Deployment) {
key, err := controller.KeyFunc(deployment)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err))
@ -384,7 +384,7 @@ func (dc *DeploymentController) enqueue(deployment *extensions.Deployment) {
dc.queue.Add(key)
}
func (dc *DeploymentController) enqueueRateLimited(deployment *extensions.Deployment) {
func (dc *DeploymentController) enqueueRateLimited(deployment *apps.Deployment) {
key, err := controller.KeyFunc(deployment)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err))
@ -395,7 +395,7 @@ func (dc *DeploymentController) enqueueRateLimited(deployment *extensions.Deploy
}
// enqueueAfter will enqueue a deployment after the provided amount of time.
func (dc *DeploymentController) enqueueAfter(deployment *extensions.Deployment, after time.Duration) {
func (dc *DeploymentController) enqueueAfter(deployment *apps.Deployment, after time.Duration) {
key, err := controller.KeyFunc(deployment)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err))
@ -406,16 +406,16 @@ func (dc *DeploymentController) enqueueAfter(deployment *extensions.Deployment,
}
// getDeploymentForPod returns the deployment managing the given Pod.
func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *extensions.Deployment {
func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deployment {
// Find the owning replica set
var rs *extensions.ReplicaSet
var rs *apps.ReplicaSet
var err error
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
// No controller owns this Pod.
return nil
}
if controllerRef.Kind != extensions.SchemeGroupVersion.WithKind("ReplicaSet").Kind {
if controllerRef.Kind != apps.SchemeGroupVersion.WithKind("ReplicaSet").Kind {
// Not a pod owned by a replica set.
return nil
}
@ -436,7 +436,7 @@ func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *extensions.Dep
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.Deployment {
func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.Deployment {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerKind.Kind {
@ -494,7 +494,7 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) {
// getReplicaSetsForDeployment uses ControllerRefManager to reconcile
// ControllerRef by adopting and orphaning.
// It returns the list of ReplicaSets that this Deployment should manage.
func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deployment) ([]*extensions.ReplicaSet, error) {
func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment) ([]*apps.ReplicaSet, error) {
// List all ReplicaSets to find those we own but that no longer match our
// selector. They will be orphaned by ClaimReplicaSets().
rsList, err := dc.rsLister.ReplicaSets(d.Namespace).List(labels.Everything())
@ -508,7 +508,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deploy
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing ReplicaSets (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
@ -525,7 +525,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deploy
//
// It returns a map from ReplicaSet UID to a list of Pods controlled by that RS,
// according to the Pod's ControllerRef.
func (dc *DeploymentController) getPodMapForDeployment(d *extensions.Deployment, rsList []*extensions.ReplicaSet) (map[types.UID]*v1.PodList, error) {
func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsList []*apps.ReplicaSet) (map[types.UID]*v1.PodList, error) {
// Get all Pods that potentially belong to this Deployment.
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
@ -586,7 +586,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
if d.Status.ObservedGeneration < d.Generation {
d.Status.ObservedGeneration = d.Generation
dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
}
return nil
}
@ -625,7 +625,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
// rollback is not re-entrant in case the underlying replica sets are updated with a new
// revision so we should ensure that we won't proceed to update replica sets until we
// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
if d.Spec.RollbackTo != nil {
if getRollbackTo(d) != nil {
return dc.rollback(d, rsList, podMap)
}
@ -638,9 +638,9 @@ func (dc *DeploymentController) syncDeployment(key string) error {
}
switch d.Spec.Strategy.Type {
case extensions.RecreateDeploymentStrategyType:
case apps.RecreateDeploymentStrategyType:
return dc.rolloutRecreate(d, rsList, podMap)
case extensions.RollingUpdateDeploymentStrategyType:
case apps.RollingUpdateDeploymentStrategyType:
return dc.rolloutRolling(d, rsList, podMap)
}
return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)

View File

@ -20,6 +20,7 @@ import (
"strconv"
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -38,7 +39,6 @@ import (
_ "k8s.io/kubernetes/pkg/apis/batch/install"
_ "k8s.io/kubernetes/pkg/apis/certificates/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
_ "k8s.io/kubernetes/pkg/apis/policy/install"
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
_ "k8s.io/kubernetes/pkg/apis/settings/install"
@ -52,14 +52,14 @@ var (
noTimestamp = metav1.Time{}
)
func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *extensions.ReplicaSet {
return &extensions.ReplicaSet{
func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *apps.ReplicaSet {
return &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
CreationTimestamp: timestamp,
Namespace: metav1.NamespaceDefault,
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: selector},
Template: v1.PodTemplateSpec{},
@ -67,27 +67,27 @@ func rs(name string, replicas int, selector map[string]string, timestamp metav1.
}
}
func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *extensions.ReplicaSet {
func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *apps.ReplicaSet {
rs := rs(name, specReplicas, selector, noTimestamp)
rs.Status = extensions.ReplicaSetStatus{
rs.Status = apps.ReplicaSetStatus{
Replicas: int32(statusReplicas),
}
return rs
}
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment {
d := extensions.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: "extensions/v1beta1"},
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment {
d := apps.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: name,
Namespace: metav1.NamespaceDefault,
Annotations: make(map[string]string),
},
Spec: extensions.DeploymentSpec{
Strategy: extensions.DeploymentStrategy{
Type: extensions.RollingUpdateDeploymentStrategyType,
RollingUpdate: &extensions.RollingUpdateDeployment{
Spec: apps.DeploymentSpec{
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
},
@ -118,8 +118,8 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
return &d
}
func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet {
return &extensions.ReplicaSet{
func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet {
return &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
UID: uuid.NewUUID(),
@ -127,7 +127,7 @@ func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensi
Labels: d.Spec.Selector.MatchLabels,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)},
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Selector: d.Spec.Selector,
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Template: d.Spec.Template,
@ -135,7 +135,7 @@ func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensi
}
}
func getKey(d *extensions.Deployment, t *testing.T) string {
func getKey(d *apps.Deployment, t *testing.T) string {
if key, err := controller.KeyFunc(d); err != nil {
t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err)
return ""
@ -149,8 +149,8 @@ type fixture struct {
client *fake.Clientset
// Objects to put in the store.
dLister []*extensions.Deployment
rsLister []*extensions.ReplicaSet
dLister []*apps.Deployment
rsLister []*apps.ReplicaSet
podLister []*v1.Pod
// Actions expected to happen on the client. Objects from here are also
@ -159,23 +159,23 @@ type fixture struct {
objects []runtime.Object
}
func (f *fixture) expectGetDeploymentAction(d *extensions.Deployment) {
func (f *fixture) expectGetDeploymentAction(d *apps.Deployment) {
action := core.NewGetAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d.Name)
f.actions = append(f.actions, action)
}
func (f *fixture) expectUpdateDeploymentStatusAction(d *extensions.Deployment) {
func (f *fixture) expectUpdateDeploymentStatusAction(d *apps.Deployment) {
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)
action.Subresource = "status"
f.actions = append(f.actions, action)
}
func (f *fixture) expectUpdateDeploymentAction(d *extensions.Deployment) {
func (f *fixture) expectUpdateDeploymentAction(d *apps.Deployment) {
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)
f.actions = append(f.actions, action)
}
func (f *fixture) expectCreateRSAction(rs *extensions.ReplicaSet) {
func (f *fixture) expectCreateRSAction(rs *apps.ReplicaSet) {
f.actions = append(f.actions, core.NewCreateAction(schema.GroupVersionResource{Resource: "replicasets"}, rs.Namespace, rs))
}
@ -189,7 +189,7 @@ func newFixture(t *testing.T) *fixture {
func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory, error) {
f.client = fake.NewSimpleClientset(f.objects...)
informers := informers.NewSharedInformerFactory(f.client, controller.NoResyncPeriodFunc())
c, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), f.client)
c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), f.client)
if err != nil {
return nil, nil, err
}
@ -198,10 +198,10 @@ func (f *fixture) newController() (*DeploymentController, informers.SharedInform
c.rsListerSynced = alwaysReady
c.podListerSynced = alwaysReady
for _, d := range f.dLister {
informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d)
informers.Apps().V1().Deployments().Informer().GetIndexer().Add(d)
}
for _, rs := range f.rsLister {
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
}
for _, pod := range f.podLister {
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
@ -344,20 +344,19 @@ func TestReentrantRollback(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d.Spec.RollbackTo = &extensions.RollbackConfig{Revision: 0}
d.Annotations = map[string]string{util.RevisionAnnotation: "2"}
setRollbackTo(d, &extensions.RollbackConfig{Revision: 0})
f.dLister = append(f.dLister, d)
rs1 := newReplicaSet(d, "deploymentrs-old", 0)
rs1.Annotations = map[string]string{util.RevisionAnnotation: "1"}
one := int64(1)
rs1.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
rs1.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] = "hash"
rs1.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
rs2 := newReplicaSet(d, "deploymentrs-new", 1)
rs2.Annotations = map[string]string{util.RevisionAnnotation: "2"}
rs2.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] = "hash"
rs2.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
f.rsLister = append(f.rsLister, rs1, rs2)
f.objects = append(f.objects, d, rs1, rs2)
@ -375,7 +374,7 @@ func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) {
f := newFixture(t)
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
rs := newReplicaSet(foo, "foo-1", 1)
pod := generatePodFromRS(rs)
@ -388,7 +387,7 @@ func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) {
t.Fatalf("error creating Deployment controller: %v", err)
}
enqueued := false
c.enqueueDeployment = func(d *extensions.Deployment) {
c.enqueueDeployment = func(d *apps.Deployment) {
if d.Name == "foo" {
enqueued = true
}
@ -408,7 +407,7 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) {
f := newFixture(t)
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
rs1 := newReplicaSet(foo, "foo-1", 1)
rs2 := newReplicaSet(foo, "foo-1", 1)
pod1 := generatePodFromRS(rs1)
@ -424,7 +423,7 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) {
t.Fatalf("error creating Deployment controller: %v", err)
}
enqueued := false
c.enqueueDeployment = func(d *extensions.Deployment) {
c.enqueueDeployment = func(d *apps.Deployment) {
if d.Name == "foo" {
enqueued = true
}
@ -445,7 +444,7 @@ func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testi
f := newFixture(t)
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
rs1 := newReplicaSet(foo, "foo-1", 1)
rs2 := newReplicaSet(foo, "foo-2", 2)
rs2.OwnerReferences = nil
@ -460,7 +459,7 @@ func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testi
t.Fatalf("error creating Deployment controller: %v", err)
}
enqueued := false
c.enqueueDeployment = func(d *extensions.Deployment) {
c.enqueueDeployment = func(d *apps.Deployment) {
if d.Name == "foo" {
enqueued = true
}
@ -481,7 +480,7 @@ func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t
f := newFixture(t)
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
rs1 := newReplicaSet(foo, "foo-1", 1)
rs2 := newReplicaSet(foo, "foo-2", 2)
rs2.OwnerReferences = nil
@ -499,7 +498,7 @@ func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t
t.Fatalf("error creating Deployment controller: %v", err)
}
enqueued := false
c.enqueueDeployment = func(d *extensions.Deployment) {
c.enqueueDeployment = func(d *apps.Deployment) {
if d.Name == "foo" {
enqueued = true
}
@ -972,7 +971,7 @@ func bumpResourceVersion(obj metav1.Object) {
}
// generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template
func generatePodFromRS(rs *extensions.ReplicaSet) *v1.Pod {
func generatePodFromRS(rs *apps.ReplicaSet) *v1.Pod {
trueVar := true
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{

View File

@ -23,8 +23,8 @@ import (
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
@ -32,18 +32,18 @@ import (
// cases this helper will run that cannot be prevented from the scaling detection,
// for example a resync of the deployment after it was scaled up. In those cases,
// we shouldn't try to estimate any progress.
func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error {
func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
newStatus := calculateStatus(allRSs, newRS, d)
// If there is no progressDeadlineSeconds set, remove any Progressing condition.
if d.Spec.ProgressDeadlineSeconds == nil {
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing)
}
// If there is only one replica set that is active then that means we are not running
// a new rollout and this is a resync where we don't need to estimate any progress.
// In such a case, we should simply not estimate any progress for this deployment.
currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
isCompleteDeployment := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason
// Check for progress only if there is a progress deadline set and the latest rollout
// hasn't completed yet.
@ -56,7 +56,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
if newRS != nil {
msg = fmt.Sprintf("ReplicaSet %q has successfully progressed.", newRS.Name)
}
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
util.SetDeploymentCondition(&newStatus, *condition)
case util.DeploymentProgressing(d, &newStatus):
@ -66,7 +66,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
if newRS != nil {
msg = fmt.Sprintf("ReplicaSet %q is progressing.", newRS.Name)
}
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
// Update the current Progressing condition or add a new one if it doesn't exist.
// If a Progressing condition with status=true already exists, we should update
// everything but lastTransitionTime. SetDeploymentCondition already does that but
@ -78,7 +78,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
if currentCond.Status == v1.ConditionTrue {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing)
}
util.SetDeploymentCondition(&newStatus, *condition)
@ -89,7 +89,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
if newRS != nil {
msg = fmt.Sprintf("ReplicaSet %q has timed out progressing.", newRS.Name)
}
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
util.SetDeploymentCondition(&newStatus, *condition)
}
}
@ -100,7 +100,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
// There will be only one ReplicaFailure condition on the replica set.
util.SetDeploymentCondition(&newStatus, replicaFailureCond[0])
} else {
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentReplicaFailure)
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentReplicaFailure)
}
// Do not update if there is nothing new to add.
@ -112,17 +112,17 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
return err
}
// getReplicaFailures will convert replica failure conditions from replica sets
// to deployment conditions.
func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) []extensions.DeploymentCondition {
var conditions []extensions.DeploymentCondition
func (dc *DeploymentController) getReplicaFailures(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) []apps.DeploymentCondition {
var conditions []apps.DeploymentCondition
if newRS != nil {
for _, c := range newRS.Status.Conditions {
if c.Type != extensions.ReplicaSetReplicaFailure {
if c.Type != apps.ReplicaSetReplicaFailure {
continue
}
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
@ -141,7 +141,7 @@ func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaS
}
for _, c := range rs.Status.Conditions {
if c.Type != extensions.ReplicaSetReplicaFailure {
if c.Type != apps.ReplicaSetReplicaFailure {
continue
}
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
@ -156,8 +156,8 @@ var nowFn = func() time.Time { return time.Now() }
// requeueStuckDeployment checks whether the provided deployment needs to be synced for a progress
// check. It returns the time after the deployment will be requeued for the progress check, 0 if it
// will be requeued now, or -1 if it does not need to be requeued.
func (dc *DeploymentController) requeueStuckDeployment(d *extensions.Deployment, newStatus extensions.DeploymentStatus) time.Duration {
currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newStatus apps.DeploymentStatus) time.Duration {
currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
// Can't estimate progress if there is no deadline in the spec or progressing condition in the current status.
if d.Spec.ProgressDeadlineSeconds == nil || currentCond == nil {
return time.Duration(-1)

View File

@ -20,16 +20,16 @@ import (
"testing"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) extensions.DeploymentStatus {
return extensions.DeploymentStatus{
func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) apps.DeploymentStatus {
return apps.DeploymentStatus{
Replicas: replicas,
UpdatedReplicas: updatedReplicas,
AvailableReplicas: availableReplicas,
@ -37,16 +37,16 @@ func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) ext
}
// assumes the retuned deployment is always observed - not needed to be tested here.
func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []extensions.DeploymentCondition) *extensions.Deployment {
d := &extensions.Deployment{
func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []apps.DeploymentCondition) *apps.Deployment {
d := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "progress-test",
},
Spec: extensions.DeploymentSpec{
Spec: apps.DeploymentSpec{
ProgressDeadlineSeconds: pds,
Replicas: &replicas,
Strategy: extensions.DeploymentStrategy{
Type: extensions.RecreateDeploymentStrategyType,
Strategy: apps.DeploymentStrategy{
Type: apps.RecreateDeploymentStrategyType,
},
},
Status: newDeploymentStatus(statusReplicas, updatedReplicas, availableReplicas),
@ -56,9 +56,9 @@ func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, av
}
// helper to create RS with given availableReplicas
func newRSWithAvailable(name string, specReplicas, statusReplicas, availableReplicas int) *extensions.ReplicaSet {
func newRSWithAvailable(name string, specReplicas, statusReplicas, availableReplicas int) *apps.ReplicaSet {
rs := rs(name, specReplicas, nil, metav1.Time{})
rs.Status = extensions.ReplicaSetStatus{
rs.Status = apps.ReplicaSetStatus{
Replicas: int32(statusReplicas),
AvailableReplicas: int32(availableReplicas),
}
@ -67,16 +67,16 @@ func newRSWithAvailable(name string, specReplicas, statusReplicas, availableRepl
func TestRequeueStuckDeployment(t *testing.T) {
pds := int32(60)
failed := []extensions.DeploymentCondition{
failed := []apps.DeploymentCondition{
{
Type: extensions.DeploymentProgressing,
Type: apps.DeploymentProgressing,
Status: v1.ConditionFalse,
Reason: util.TimedOutReason,
},
}
stuck := []extensions.DeploymentCondition{
stuck := []apps.DeploymentCondition{
{
Type: extensions.DeploymentProgressing,
Type: apps.DeploymentProgressing,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC),
},
@ -84,8 +84,8 @@ func TestRequeueStuckDeployment(t *testing.T) {
tests := []struct {
name string
d *extensions.Deployment
status extensions.DeploymentStatus
d *apps.Deployment
status apps.DeploymentStatus
nowFn func() time.Time
expected time.Duration
}{
@ -178,20 +178,20 @@ func TestRequeueStuckDeployment(t *testing.T) {
func TestSyncRolloutStatus(t *testing.T) {
pds := int32(60)
testTime := metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC)
failedTimedOut := extensions.DeploymentCondition{
Type: extensions.DeploymentProgressing,
failedTimedOut := apps.DeploymentCondition{
Type: apps.DeploymentProgressing,
Status: v1.ConditionFalse,
Reason: util.TimedOutReason,
}
newRSAvailable := extensions.DeploymentCondition{
Type: extensions.DeploymentProgressing,
newRSAvailable := apps.DeploymentCondition{
Type: apps.DeploymentProgressing,
Status: v1.ConditionTrue,
Reason: util.NewRSAvailableReason,
LastUpdateTime: testTime,
LastTransitionTime: testTime,
}
replicaSetUpdated := extensions.DeploymentCondition{
Type: extensions.DeploymentProgressing,
replicaSetUpdated := apps.DeploymentCondition{
Type: apps.DeploymentProgressing,
Status: v1.ConditionTrue,
Reason: util.ReplicaSetUpdatedReason,
LastUpdateTime: testTime,
@ -200,10 +200,10 @@ func TestSyncRolloutStatus(t *testing.T) {
tests := []struct {
name string
d *extensions.Deployment
allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
conditionType extensions.DeploymentConditionType
d *apps.Deployment
allRSs []*apps.ReplicaSet
newRS *apps.ReplicaSet
conditionType apps.DeploymentConditionType
conditionStatus v1.ConditionStatus
conditionReason string
lastUpdate metav1.Time
@ -211,15 +211,15 @@ func TestSyncRolloutStatus(t *testing.T) {
}{
{
name: "General: remove Progressing condition and do not estimate progress if deployment has no Progress Deadline",
d: currentDeployment(nil, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}),
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
d: currentDeployment(nil, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
newRS: newRSWithAvailable("foo", 3, 2, 2),
},
{
name: "General: do not estimate progress of deployment with only one active ReplicaSet",
d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{newRSAvailable}),
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 3, 3, 3)},
conditionType: extensions.DeploymentProgressing,
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{newRSAvailable}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 3, 3, 3)},
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
lastUpdate: testTime,
@ -227,83 +227,83 @@ func TestSyncRolloutStatus(t *testing.T) {
},
{
name: "DeploymentProgressing: dont update lastTransitionTime if deployment already has Progressing=True",
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}),
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: extensions.DeploymentProgressing,
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.ReplicaSetUpdatedReason,
lastTransition: testTime,
},
{
name: "DeploymentProgressing: update everything if deployment has Progressing=False",
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{failedTimedOut}),
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: extensions.DeploymentProgressing,
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.ReplicaSetUpdatedReason,
},
{
name: "DeploymentProgressing: create Progressing condition if it does not exist",
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{}),
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: extensions.DeploymentProgressing,
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.ReplicaSetUpdatedReason,
},
{
name: "DeploymentComplete: dont update lastTransitionTime if deployment already has Progressing=True",
d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{replicaSetUpdated}),
allRSs: []*extensions.ReplicaSet{},
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 3, 3),
conditionType: extensions.DeploymentProgressing,
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
lastTransition: testTime,
},
{
name: "DeploymentComplete: update everything if deployment has Progressing=False",
d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{failedTimedOut}),
allRSs: []*extensions.ReplicaSet{},
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{failedTimedOut}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 3, 3),
conditionType: extensions.DeploymentProgressing,
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
},
{
name: "DeploymentComplete: create Progressing condition if it does not exist",
d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{}),
allRSs: []*extensions.ReplicaSet{},
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 3, 3),
conditionType: extensions.DeploymentProgressing,
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
},
{
name: "DeploymentComplete: defend against NPE when newRS=nil",
d: currentDeployment(&pds, 0, 3, 3, 3, []extensions.DeploymentCondition{replicaSetUpdated}),
allRSs: []*extensions.ReplicaSet{newRSWithAvailable("foo", 0, 0, 0)},
conditionType: extensions.DeploymentProgressing,
d: currentDeployment(&pds, 0, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("foo", 0, 0, 0)},
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
},
{
name: "DeploymentTimedOut: update status if rollout exceeds Progress Deadline",
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}),
allRSs: []*extensions.ReplicaSet{},
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: extensions.DeploymentProgressing,
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionFalse,
conditionReason: util.TimedOutReason,
},
{
name: "DeploymentTimedOut: do not update status if deployment has existing timedOut condition",
d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{failedTimedOut}),
allRSs: []*extensions.ReplicaSet{},
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: extensions.DeploymentProgressing,
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionFalse,
conditionReason: util.TimedOutReason,
lastUpdate: testTime,

View File

@ -17,15 +17,15 @@ limitations under the License.
package deployment
import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
// rolloutRecreate implements the logic for recreating a replica set.
func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
@ -74,7 +74,7 @@ func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList
}
// scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate".
func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
scaled := false
for i := range oldRSs {
rs := oldRSs[i]
@ -95,7 +95,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*ext
}
// oldPodsRunning returns whether there are old pods running or any of the old ReplicaSets thinks that it runs pods.
func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) bool {
func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) bool {
if oldPods := util.GetActualReplicaCountForReplicaSets(oldRSs); oldPods > 0 {
return true
}
@ -123,7 +123,7 @@ func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSe
}
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate".
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
return scaled, err
}

View File

@ -20,8 +20,8 @@ import (
"fmt"
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
@ -33,7 +33,7 @@ import (
func TestScaleDownOldReplicaSets(t *testing.T) {
tests := []struct {
oldRSSizes []int
d *extensions.Deployment
d *apps.Deployment
}{
{
oldRSSizes: []int{3},
@ -45,7 +45,7 @@ func TestScaleDownOldReplicaSets(t *testing.T) {
t.Logf("running scenario %d", i)
test := tests[i]
var oldRSs []*extensions.ReplicaSet
var oldRSs []*apps.ReplicaSet
var expected []runtime.Object
for n, size := range test.oldRSSizes {
@ -58,14 +58,14 @@ func TestScaleDownOldReplicaSets(t *testing.T) {
rsCopy.Spec.Replicas = &zero
expected = append(expected, rsCopy)
if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*extensions.ReplicaSet).Spec.Replicas) {
if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*apps.ReplicaSet).Spec.Replicas) {
t.Errorf("broken test - original and expected RS have the same size")
}
}
kc := fake.NewSimpleClientset(expected...)
informers := informers.NewSharedInformerFactory(kc, controller.NoResyncPeriodFunc())
c, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), kc)
c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), kc)
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
@ -86,8 +86,8 @@ func TestOldPodsRunning(t *testing.T) {
tests := []struct {
name string
newRS *extensions.ReplicaSet
oldRSs []*extensions.ReplicaSet
newRS *apps.ReplicaSet
oldRSs []*apps.ReplicaSet
podMap map[types.UID]*v1.PodList
hasOldPodsRunning bool
@ -98,23 +98,23 @@ func TestOldPodsRunning(t *testing.T) {
},
{
name: "old RSs with running pods",
oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
oldRSs: []*apps.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
hasOldPodsRunning: true,
},
{
name: "old RSs without pods but with non-zero status replicas",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)},
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)},
hasOldPodsRunning: true,
},
{
name: "old RSs without pods or non-zero status replicas",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
hasOldPodsRunning: false,
},
{
name: "old RSs with zero status replicas but pods in terminal state are present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
@ -135,7 +135,7 @@ func TestOldPodsRunning(t *testing.T) {
},
{
name: "old RSs with zero status replicas but pod in unknown phase present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
@ -151,7 +151,7 @@ func TestOldPodsRunning(t *testing.T) {
},
{
name: "old RSs with zero status replicas with pending pod present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
@ -167,7 +167,7 @@ func TestOldPodsRunning(t *testing.T) {
},
{
name: "old RSs with zero status replicas with running pod present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
@ -183,7 +183,7 @@ func TestOldPodsRunning(t *testing.T) {
},
{
name: "old RSs with zero status replicas but pods in terminal state and pending are present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
@ -225,7 +225,7 @@ func TestOldPodsRunning(t *testing.T) {
}
}
func rsWithUID(uid string) *extensions.ReplicaSet {
func rsWithUID(uid string) *apps.ReplicaSet {
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
rs := newReplicaSet(d, fmt.Sprintf("foo-%s", uid), 0)
rs.UID = types.UID(uid)

View File

@ -18,9 +18,11 @@ package deployment
import (
"fmt"
"strconv"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/types"
@ -28,17 +30,17 @@ import (
)
// rollback the deployment to the specified revision. In any case cleanup the rollback spec.
func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
if err != nil {
return err
}
allRSs := append(allOldRSs, newRS)
toRevision := &d.Spec.RollbackTo.Revision
rollbackTo := getRollbackTo(d)
// If rollback revision is 0, rollback to the last revision
if *toRevision == 0 {
if *toRevision = deploymentutil.LastRevision(allRSs); *toRevision == 0 {
if rollbackTo.Revision == 0 {
if rollbackTo.Revision = deploymentutil.LastRevision(allRSs); rollbackTo.Revision == 0 {
// If we still can't find the last revision, gives up rollback
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.")
// Gives up rollback
@ -51,14 +53,14 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext
glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
continue
}
if v == *toRevision {
if v == rollbackTo.Revision {
glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
// rollback by copying podTemplate.Spec from the replica set
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
// no-op if the spec matches current deployment's podTemplate.Spec
performedRollback, err := dc.rollbackToTemplate(d, rs)
if performedRollback && err == nil {
dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, *toRevision))
dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, rollbackTo.Revision))
}
return err
}
@ -71,7 +73,7 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext
// rollbackToTemplate compares the templates of the provided deployment and replica set and
// updates the deployment with the replica set template in case they are different. It also
// cleans up the rollback spec so subsequent requeues of the deployment won't end up in here.
func (dc *DeploymentController) rollbackToTemplate(d *extensions.Deployment, rs *extensions.ReplicaSet) (bool, error) {
func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) {
performedRollback := false
if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
@ -98,20 +100,49 @@ func (dc *DeploymentController) rollbackToTemplate(d *extensions.Deployment, rs
return performedRollback, dc.updateDeploymentAndClearRollbackTo(d)
}
func (dc *DeploymentController) emitRollbackWarningEvent(d *extensions.Deployment, reason, message string) {
func (dc *DeploymentController) emitRollbackWarningEvent(d *apps.Deployment, reason, message string) {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, reason, message)
}
func (dc *DeploymentController) emitRollbackNormalEvent(d *extensions.Deployment, message string) {
func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, message string) {
dc.eventRecorder.Eventf(d, v1.EventTypeNormal, deploymentutil.RollbackDone, message)
}
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
// It is assumed that the caller will have updated the deployment template appropriately (in case
// we want to rollback).
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *extensions.Deployment) error {
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
d.Spec.RollbackTo = nil
_, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Update(d)
setRollbackTo(d, nil)
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d)
return err
}
// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped.
func getRollbackTo(d *apps.Deployment) *extensions.RollbackConfig {
// Extract the annotation used for round-tripping the deprecated RollbackTo field.
revision := d.Annotations[apps.DeprecatedRollbackTo]
if revision == "" {
return nil
}
revision64, err := strconv.ParseInt(revision, 10, 64)
if err != nil {
// If it's invalid, ignore it.
return nil
}
return &extensions.RollbackConfig{
Revision: revision64,
}
}
// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped.
func setRollbackTo(d *apps.Deployment, rollbackTo *extensions.RollbackConfig) {
if rollbackTo == nil {
delete(d.Annotations, apps.DeprecatedRollbackTo)
return
}
if d.Annotations == nil {
d.Annotations = make(map[string]string)
}
d.Annotations[apps.DeprecatedRollbackTo] = strconv.FormatInt(rollbackTo.Revision, 10)
}

View File

@ -21,8 +21,8 @@ import (
"sort"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/integer"
"k8s.io/kubernetes/pkg/controller"
@ -30,7 +30,7 @@ import (
)
// rolloutRolling implements the logic for rolling a new replica set.
func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
if err != nil {
return err
@ -67,7 +67,7 @@ func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList
return dc.syncRolloutStatus(allRSs, newRS, d)
}
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
// Scaling not required.
return false, nil
@ -85,7 +85,7 @@ func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.Repl
return scaled, err
}
func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs)
if oldPodsCount == 0 {
// Can't scale down further
@ -154,7 +154,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
}
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) {
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment, maxCleanupCount int32) ([]*apps.ReplicaSet, int32, error) {
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
@ -191,7 +191,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
// Need check maxUnavailable to ensure availability
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) {
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (int32, error) {
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
// Check if we can scale down.

View File

@ -19,7 +19,7 @@ package deployment
import (
"testing"
extensions "k8s.io/api/extensions/v1beta1"
apps "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
@ -82,7 +82,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
t.Logf("executing scenario %d", i)
newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
allRSs := []*extensions.ReplicaSet{newRS, oldRS}
allRSs := []*apps.ReplicaSet{newRS, oldRS}
maxUnavailable := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"})
fake := fake.Clientset{}
@ -109,7 +109,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
t.Errorf("expected 1 action during scale, got: %v", fake.Actions())
continue
}
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*apps.ReplicaSet)
if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
@ -187,8 +187,8 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
newRS.Status.AvailableReplicas = int32(test.readyPodsFromNewRS)
oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS)
oldRSs := []*extensions.ReplicaSet{oldRS}
allRSs := []*extensions.ReplicaSet{oldRS, newRS}
oldRSs := []*apps.ReplicaSet{oldRS}
allRSs := []*apps.ReplicaSet{oldRS, newRS}
maxSurge := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector)
fakeClientset := fake.Clientset{}
@ -255,7 +255,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
t.Logf("executing scenario %d", i)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
oldRSs := []*extensions.ReplicaSet{oldRS}
oldRSs := []*apps.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(2)
maxUnavailable := intstr.FromInt(2)
deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil)
@ -330,8 +330,8 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
t.Logf("executing scenario %d", i)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
allRSs := []*extensions.ReplicaSet{oldRS}
oldRSs := []*extensions.ReplicaSet{oldRS}
allRSs := []*apps.ReplicaSet{oldRS}
oldRSs := []*apps.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"})
fakeClientset := fake.Clientset{}
@ -371,7 +371,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
t.Errorf("expected an update action")
continue
}
updated := updateAction.GetObject().(*extensions.ReplicaSet)
updated := updateAction.GetObject().(*apps.ReplicaSet)
if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}

View File

@ -23,8 +23,8 @@ import (
"strconv"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@ -35,7 +35,7 @@ import (
)
// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
func (dc *DeploymentController) syncStatusOnly(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
return err
@ -47,7 +47,7 @@ func (dc *DeploymentController) syncStatusOnly(d *extensions.Deployment, rsList
// sync is responsible for reconciling deployments on scaling events or when they
// are paused.
func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
return err
@ -59,7 +59,7 @@ func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensi
}
// Clean up the deployment when it's paused and no rollback is in flight.
if d.Spec.Paused && d.Spec.RollbackTo == nil {
if d.Spec.Paused && getRollbackTo(d) == nil {
if err := dc.cleanupDeployment(oldRSs, d); err != nil {
return err
}
@ -72,11 +72,11 @@ func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensi
// checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition.
// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
// that were paused for longer than progressDeadlineSeconds.
func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) error {
func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error {
if d.Spec.ProgressDeadlineSeconds == nil {
return nil
}
cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
if cond != nil && cond.Reason == deploymentutil.TimedOutReason {
// If we have reported lack of progress, do not overwrite it with a paused condition.
return nil
@ -85,11 +85,11 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
needsUpdate := false
if d.Spec.Paused && !pausedCondExists {
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
} else if !d.Spec.Paused && pausedCondExists {
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
}
@ -99,7 +99,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
}
var err error
d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
return err
}
@ -115,7 +115,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
//
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
// This may lead to stale reads of replica sets, thus incorrect deployment status.
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
_, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList)
// Get new replica set with the updated revision number
@ -132,7 +132,7 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.D
// 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
// 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas.
// Note that the pod-template-hash will be added to adopted RSes and pods.
func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsList, oldRSs []*extensions.ReplicaSet, createIfNotExisted bool) (*extensions.ReplicaSet, error) {
func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, oldRSs []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, error) {
existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList)
// Calculate the max revision number among all old RSes
@ -152,7 +152,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
if annotationsUpdated || minReadySecondsNeedsUpdate {
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
return dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
}
// Should use the revision in existingNewRS's annotation, since it set by before
@ -160,17 +160,17 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
// If no other Progressing condition has been recorded and we need to estimate the progress
// of this deployment then it is likely that old users started caring about progress. In that
// case we need to take into account the first time we noticed their new replica set.
cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
if d.Spec.ProgressDeadlineSeconds != nil && cond == nil {
msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
}
if needsUpdate {
var err error
if d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
return nil, err
}
}
@ -184,19 +184,20 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
// new ReplicaSet does not exist, create one.
newRSTemplate := *d.Spec.Template.DeepCopy()
podTemplateSpecHash := fmt.Sprintf("%d", controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount))
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
// Add podTemplateHash label to selector.
newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
// Create new ReplicaSet
newRS := extensions.ReplicaSet{
newRS := apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
// Make the name deterministic, to ensure idempotence
Name: d.Name + "-" + rand.SafeEncodeString(podTemplateSpecHash),
Namespace: d.Namespace,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)},
Labels: newRSTemplate.Labels,
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Replicas: new(int32),
MinReadySeconds: d.Spec.MinReadySeconds,
Selector: newRSSelector,
@ -216,7 +217,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
// hash collisions. If there is any other error, we need to report it in the status of
// the Deployment.
alreadyExists := false
createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS)
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(&newRS)
switch {
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
case errors.IsAlreadyExists(err):
@ -248,7 +249,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
*d.Status.CollisionCount++
// Update the collisionCount for the Deployment and let it requeue by returning the original
// error.
_, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
if dErr == nil {
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
}
@ -256,12 +257,12 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
case err != nil:
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
if d.Spec.ProgressDeadlineSeconds != nil {
cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
cond := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
deploymentutil.SetDeploymentCondition(&d.Status, *cond)
// We don't really care about this error at this point, since we have a bigger issue to report.
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
_, _ = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
}
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
return nil, err
@ -273,12 +274,12 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
needsUpdate := deploymentutil.SetDeploymentRevision(d, newRevision)
if !alreadyExists && d.Spec.ProgressDeadlineSeconds != nil {
msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
}
if needsUpdate {
_, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
}
return createdRS, err
}
@ -288,7 +289,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
// have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable
// replicas in the event of a problem with the rolled out template. Should run only on scaling events or
// when a deployment is paused and not during the normal rollout process.
func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) error {
func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) error {
// If there is only one active replica set then we should scale that up to the full count of the
// deployment. If there is no active replica set, then we should scale up the newest replica set.
if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {
@ -386,7 +387,7 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
return nil
}
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) {
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment) (bool, *apps.ReplicaSet, error) {
// No need to scale
if *(rs.Spec.Replicas) == newScale {
return false, rs, nil
@ -401,7 +402,7 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.Rep
return scaled, newRS, err
}
func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment, scalingOperation string) (bool, *extensions.ReplicaSet, error) {
func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment, scalingOperation string) (bool, *apps.ReplicaSet, error) {
sizeNeedsUpdate := *(rs.Spec.Replicas) != newScale
@ -413,7 +414,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
rsCopy := rs.DeepCopy()
*(rsCopy.Spec.Replicas) = newScale
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
rs, err = dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
if err == nil && sizeNeedsUpdate {
scaled = true
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
@ -425,13 +426,13 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
// cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets
// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error {
func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error {
if deployment.Spec.RevisionHistoryLimit == nil {
return nil
}
// Avoid deleting replica set with deletion timestamp set
aliveFilter := func(rs *extensions.ReplicaSet) bool {
aliveFilter := func(rs *apps.ReplicaSet) bool {
return rs != nil && rs.ObjectMeta.DeletionTimestamp == nil
}
cleanableRSes := controller.FilterReplicaSets(oldRSs, aliveFilter)
@ -451,7 +452,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
continue
}
glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
if err := dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
// Return error instead of aggregating and continuing DELETEs on the theory
// that we may be overloading the api server.
return err
@ -462,7 +463,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
}
// syncDeploymentStatus checks if the status is up-to-date and sync it if necessary
func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error {
func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
newStatus := calculateStatus(allRSs, newRS, d)
if reflect.DeepEqual(d.Status, newStatus) {
@ -471,12 +472,12 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.Replic
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
return err
}
// calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets.
func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) extensions.DeploymentStatus {
func calculateStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) apps.DeploymentStatus {
availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
unavailableReplicas := totalReplicas - availableReplicas
@ -486,11 +487,11 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS
unavailableReplicas = 0
}
status := extensions.DeploymentStatus{
status := apps.DeploymentStatus{
// TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value.
ObservedGeneration: deployment.Generation,
Replicas: deploymentutil.GetActualReplicaCountForReplicaSets(allRSs),
UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}),
UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*apps.ReplicaSet{newRS}),
ReadyReplicas: deploymentutil.GetReadyReplicaCountForReplicaSets(allRSs),
AvailableReplicas: availableReplicas,
UnavailableReplicas: unavailableReplicas,
@ -504,10 +505,10 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS
}
if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) {
minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
minAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
deploymentutil.SetDeploymentCondition(&status, *minAvailability)
} else {
noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
noMinAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
deploymentutil.SetDeploymentCondition(&status, *noMinAvailability)
}
@ -519,7 +520,7 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS
//
// rsList should come from getReplicaSetsForDeployment(d).
// podMap should come from getPodMapForDeployment(d, rsList).
func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) {
func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
return false, err

View File

@ -20,7 +20,7 @@ import (
"testing"
"time"
extensions "k8s.io/api/extensions/v1beta1"
apps "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/informers"
@ -41,7 +41,7 @@ func TestScale(t *testing.T) {
oldTimestamp := metav1.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC)
olderTimestamp := metav1.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC)
var updatedTemplate = func(replicas int) *extensions.Deployment {
var updatedTemplate = func(replicas int) *apps.Deployment {
d := newDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"})
d.Spec.Template.Labels["another"] = "label"
return d
@ -49,14 +49,14 @@ func TestScale(t *testing.T) {
tests := []struct {
name string
deployment *extensions.Deployment
oldDeployment *extensions.Deployment
deployment *apps.Deployment
oldDeployment *apps.Deployment
newRS *extensions.ReplicaSet
oldRSs []*extensions.ReplicaSet
newRS *apps.ReplicaSet
oldRSs []*apps.ReplicaSet
expectedNew *extensions.ReplicaSet
expectedOld []*extensions.ReplicaSet
expectedNew *apps.ReplicaSet
expectedOld []*apps.ReplicaSet
wasntUpdated map[string]bool
desiredReplicasAnnotations map[string]int32
@ -67,10 +67,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
newRS: rs("foo-v1", 10, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{},
oldRSs: []*apps.ReplicaSet{},
expectedNew: rs("foo-v1", 12, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{},
expectedOld: []*apps.ReplicaSet{},
},
{
name: "normal scaling event: 10 -> 5",
@ -78,10 +78,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
newRS: rs("foo-v1", 10, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{},
oldRSs: []*apps.ReplicaSet{},
expectedNew: rs("foo-v1", 5, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{},
expectedOld: []*apps.ReplicaSet{},
},
{
name: "proportional scaling: 5 -> 10",
@ -89,10 +89,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v2", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
},
{
name: "proportional scaling: 5 -> 3",
@ -100,10 +100,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v2", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 1, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)},
},
{
name: "proportional scaling: 9 -> 4",
@ -111,10 +111,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 9, nil, nil, nil, nil),
newRS: rs("foo-v2", 8, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)},
},
{
name: "proportional scaling: 7 -> 10",
@ -122,10 +122,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 7, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 3, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
},
{
name: "proportional scaling: 13 -> 8",
@ -133,10 +133,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 13, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
},
// Scales up the new replica set.
{
@ -145,10 +145,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
newRS: rs("foo-v3", 1, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
},
// Scales down the older replica set.
{
@ -157,10 +157,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
newRS: rs("foo-v3", 1, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
},
// Scales up the latest replica set first.
{
@ -169,10 +169,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 4, nil, nil, nil, nil),
newRS: nil,
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedNew: nil,
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
},
// Scales down to zero
{
@ -181,10 +181,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
newRS: rs("foo-v3", 3, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 0, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
},
// Scales up from zero
{
@ -193,10 +193,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
newRS: rs("foo-v3", 0, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 6, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true},
},
// Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 )
@ -208,10 +208,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true},
desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)},
@ -222,10 +222,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 10, nil, intOrStrP(2), nil, nil),
newRS: rs("foo-v2", 6, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 11, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)},
},
{
name: "change both surge and size",
@ -233,10 +233,10 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 10, nil, intOrStrP(3), nil, nil),
newRS: rs("foo-v2", 5, nil, newTimestamp),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 22, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)},
},
{
name: "change both size and template",
@ -244,25 +244,25 @@ func TestScale(t *testing.T) {
oldDeployment: newDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}),
newRS: nil,
oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: nil,
expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
},
{
name: "saturated but broken new replica set does not affect old pods",
deployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
oldDeployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
newRS: func() *extensions.ReplicaSet {
newRS: func() *apps.ReplicaSet {
rs := rs("foo-v2", 2, nil, newTimestamp)
rs.Status.AvailableReplicas = 0
return rs
}(),
oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 2, nil, newTimestamp),
expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
},
}
@ -313,7 +313,7 @@ func TestScale(t *testing.T) {
}
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
for _, action := range fake.Actions() {
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
rs := action.(testclient.UpdateAction).GetObject().(*apps.ReplicaSet)
if !test.wasntUpdated[rs.Name] {
nameToSize[rs.Name] = *(rs.Spec.Replicas)
}
@ -345,12 +345,12 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
alreadyDeleted.DeletionTimestamp = &now
tests := []struct {
oldRSs []*extensions.ReplicaSet
oldRSs []*apps.ReplicaSet
revisionHistoryLimit int32
expectedDeletions int
}{
{
oldRSs: []*extensions.ReplicaSet{
oldRSs: []*apps.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 0, selector),
newRSWithStatus("foo-3", 0, 0, selector),
@ -360,7 +360,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
},
{
// Only delete the replica set with Spec.Replicas = Status.Replicas = 0.
oldRSs: []*extensions.ReplicaSet{
oldRSs: []*apps.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 1, selector),
newRSWithStatus("foo-3", 1, 0, selector),
@ -371,7 +371,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
},
{
oldRSs: []*extensions.ReplicaSet{
oldRSs: []*apps.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 0, selector),
},
@ -379,7 +379,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
expectedDeletions: 2,
},
{
oldRSs: []*extensions.ReplicaSet{
oldRSs: []*apps.ReplicaSet{
newRSWithStatus("foo-1", 1, 1, selector),
newRSWithStatus("foo-2", 1, 1, selector),
},
@ -387,7 +387,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
expectedDeletions: 0,
},
{
oldRSs: []*extensions.ReplicaSet{
oldRSs: []*apps.ReplicaSet{
alreadyDeleted,
},
revisionHistoryLimit: 0,
@ -401,7 +401,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc())
controller, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake)
controller, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), fake)
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
@ -411,7 +411,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
controller.rsListerSynced = alwaysReady
controller.podListerSynced = alwaysReady
for _, rs := range test.oldRSs {
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
}
stopCh := make(chan struct{})

View File

@ -19,8 +19,8 @@ go_library(
"//pkg/controller:go_default_library",
"//pkg/util/labels:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -29,10 +29,10 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
],
@ -48,8 +48,8 @@ go_test(
deps = [
"//pkg/controller:go_default_library",
"//pkg/util/hash:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -25,8 +25,8 @@ import (
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -34,7 +34,7 @@ import (
"k8s.io/apimachinery/pkg/types"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
"k8s.io/client-go/util/integer"
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/controller"
@ -98,8 +98,8 @@ const (
)
// NewDeploymentCondition creates a new deployment condition.
func NewDeploymentCondition(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *extensions.DeploymentCondition {
return &extensions.DeploymentCondition{
func NewDeploymentCondition(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *apps.DeploymentCondition {
return &apps.DeploymentCondition{
Type: condType,
Status: status,
LastUpdateTime: metav1.Now(),
@ -110,7 +110,7 @@ func NewDeploymentCondition(condType extensions.DeploymentConditionType, status
}
// GetDeploymentCondition returns the condition with the provided type.
func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensions.DeploymentConditionType) *extensions.DeploymentCondition {
func GetDeploymentCondition(status apps.DeploymentStatus, condType apps.DeploymentConditionType) *apps.DeploymentCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
@ -122,7 +122,7 @@ func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensi
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason then we are not going to update.
func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) {
func SetDeploymentCondition(status *apps.DeploymentStatus, condition apps.DeploymentCondition) {
currentCond := GetDeploymentCondition(*status, condition.Type)
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
@ -136,13 +136,13 @@ func SetDeploymentCondition(status *extensions.DeploymentStatus, condition exten
}
// RemoveDeploymentCondition removes the deployment condition with the provided type.
func RemoveDeploymentCondition(status *extensions.DeploymentStatus, condType extensions.DeploymentConditionType) {
func RemoveDeploymentCondition(status *apps.DeploymentStatus, condType apps.DeploymentConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type.
func filterOutCondition(conditions []extensions.DeploymentCondition, condType extensions.DeploymentConditionType) []extensions.DeploymentCondition {
var newConditions []extensions.DeploymentCondition
func filterOutCondition(conditions []apps.DeploymentCondition, condType apps.DeploymentConditionType) []apps.DeploymentCondition {
var newConditions []apps.DeploymentCondition
for _, c := range conditions {
if c.Type == condType {
continue
@ -154,9 +154,9 @@ func filterOutCondition(conditions []extensions.DeploymentCondition, condType ex
// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition.
// Useful for promoting replica set failure conditions into deployments.
func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extensions.DeploymentCondition {
return extensions.DeploymentCondition{
Type: extensions.DeploymentConditionType(cond.Type),
func ReplicaSetToDeploymentCondition(cond apps.ReplicaSetCondition) apps.DeploymentCondition {
return apps.DeploymentCondition{
Type: apps.DeploymentConditionType(cond.Type),
Status: cond.Status,
LastTransitionTime: cond.LastTransitionTime,
LastUpdateTime: cond.LastTransitionTime,
@ -166,7 +166,7 @@ func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extens
}
// SetDeploymentRevision updates the revision for a deployment.
func SetDeploymentRevision(deployment *extensions.Deployment, revision string) bool {
func SetDeploymentRevision(deployment *apps.Deployment, revision string) bool {
updated := false
if deployment.Annotations == nil {
@ -181,7 +181,7 @@ func SetDeploymentRevision(deployment *extensions.Deployment, revision string) b
}
// MaxRevision finds the highest revision in the replica sets
func MaxRevision(allRSs []*extensions.ReplicaSet) int64 {
func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
max := int64(0)
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
@ -195,7 +195,7 @@ func MaxRevision(allRSs []*extensions.ReplicaSet) int64 {
}
// LastRevision finds the second max revision number in all replica sets (the last revision)
func LastRevision(allRSs []*extensions.ReplicaSet) int64 {
func LastRevision(allRSs []*apps.ReplicaSet) int64 {
max, secMax := int64(0), int64(0)
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
@ -226,7 +226,7 @@ func Revision(obj runtime.Object) (int64, error) {
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, newRevision string, exists bool) bool {
func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool) bool {
// First, copy deployment's annotations (except for apply and revision annotations)
annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS)
// Then, update replica set's revision annotation
@ -283,6 +283,7 @@ var annotationsToSkip = map[string]bool{
RevisionHistoryAnnotation: true,
DesiredReplicasAnnotation: true,
MaxReplicasAnnotation: true,
apps.DeprecatedRollbackTo: true,
}
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
@ -295,7 +296,7 @@ func skipCopyAnnotation(key string) bool {
// copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations,
// and returns true if replica set's annotation is changed.
// Note that apply and revision annotations are not copied.
func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool {
func copyDeploymentAnnotationsToReplicaSet(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
rsAnnotationsChanged := false
if rs.Annotations == nil {
rs.Annotations = make(map[string]string)
@ -316,7 +317,7 @@ func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs
// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations.
// This action should be done if and only if the deployment is rolling back to this rs.
// Note that apply and revision annotations are not changed.
func SetDeploymentAnnotationsTo(deployment *extensions.Deployment, rollbackToRS *extensions.ReplicaSet) {
func SetDeploymentAnnotationsTo(deployment *apps.Deployment, rollbackToRS *apps.ReplicaSet) {
deployment.Annotations = getSkippedAnnotations(deployment.Annotations)
for k, v := range rollbackToRS.Annotations {
if !skipCopyAnnotation(k) {
@ -337,7 +338,7 @@ func getSkippedAnnotations(annotations map[string]string) map[string]string {
// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active
// replica set. If there are more active replica sets, then we should proportionally scale them.
func FindActiveOrLatest(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) *extensions.ReplicaSet {
func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps.ReplicaSet {
if newRS == nil && len(oldRSs) == 0 {
return nil
}
@ -360,15 +361,15 @@ func FindActiveOrLatest(newRS *extensions.ReplicaSet, oldRSs []*extensions.Repli
}
// GetDesiredReplicasAnnotation returns the number of desired replicas
func GetDesiredReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) {
func GetDesiredReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
return getIntFromAnnotation(rs, DesiredReplicasAnnotation)
}
func getMaxReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) {
func getMaxReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
return getIntFromAnnotation(rs, MaxReplicasAnnotation)
}
func getIntFromAnnotation(rs *extensions.ReplicaSet, annotationKey string) (int32, bool) {
func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool) {
annotationValue, ok := rs.Annotations[annotationKey]
if !ok {
return int32(0), false
@ -382,7 +383,7 @@ func getIntFromAnnotation(rs *extensions.ReplicaSet, annotationKey string) (int3
}
// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations
func SetReplicasAnnotations(rs *extensions.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
func SetReplicasAnnotations(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
updated := false
if rs.Annotations == nil {
rs.Annotations = make(map[string]string)
@ -401,7 +402,7 @@ func SetReplicasAnnotations(rs *extensions.ReplicaSet, desiredReplicas, maxRepli
}
// AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated
func ReplicasAnnotationsNeedUpdate(rs *extensions.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
func ReplicasAnnotationsNeedUpdate(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
if rs.Annotations == nil {
return true
}
@ -417,7 +418,7 @@ func ReplicasAnnotationsNeedUpdate(rs *extensions.ReplicaSet, desiredReplicas, m
}
// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
func MaxUnavailable(deployment extensions.Deployment) int32 {
func MaxUnavailable(deployment apps.Deployment) int32 {
if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 {
return int32(0)
}
@ -430,7 +431,7 @@ func MaxUnavailable(deployment extensions.Deployment) int32 {
}
// MinAvailable returns the minimum available pods of a given deployment
func MinAvailable(deployment *extensions.Deployment) int32 {
func MinAvailable(deployment *apps.Deployment) int32 {
if !IsRollingUpdate(deployment) {
return int32(0)
}
@ -438,7 +439,7 @@ func MinAvailable(deployment *extensions.Deployment) int32 {
}
// MaxSurge returns the maximum surge pods a rolling deployment can take.
func MaxSurge(deployment extensions.Deployment) int32 {
func MaxSurge(deployment apps.Deployment) int32 {
if !IsRollingUpdate(&deployment) {
return int32(0)
}
@ -450,7 +451,7 @@ func MaxSurge(deployment extensions.Deployment) int32 {
// GetProportion will estimate the proportion for the provided replica set using 1. the current size
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
return int32(0)
}
@ -472,7 +473,7 @@ func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymen
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) int32 {
func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 {
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
if *(d.Spec.Replicas) == int32(0) {
return -*(rs.Spec.Replicas)
@ -497,7 +498,7 @@ func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) in
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
// The third returned value is the new replica set, and it may be nil if it doesn't exist yet.
func GetAllReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, *extensions.ReplicaSet, error) {
func GetAllReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) {
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
if err != nil {
return nil, nil, nil, err
@ -509,7 +510,7 @@ func GetAllReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex
// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func GetOldReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
func GetOldReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) {
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
if err != nil {
return nil, nil, err
@ -520,7 +521,7 @@ func GetOldReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
// Returns nil if the new replica set doesn't exist yet.
func GetNewReplicaSet(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) (*extensions.ReplicaSet, error) {
func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) {
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
if err != nil {
return nil, err
@ -529,13 +530,13 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c extensionsv1beta1.Ext
}
// RsListFromClient returns an rsListFunc that wraps the given client.
func RsListFromClient(c extensionsv1beta1.ExtensionsV1beta1Interface) RsListFunc {
return func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) {
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
rsList, err := c.ReplicaSets(namespace).List(options)
if err != nil {
return nil, err
}
var ret []*extensions.ReplicaSet
var ret []*apps.ReplicaSet
for i := range rsList.Items {
ret = append(ret, &rsList.Items[i])
}
@ -544,14 +545,14 @@ func RsListFromClient(c extensionsv1beta1.ExtensionsV1beta1Interface) RsListFunc
}
// TODO: switch this to full namespacers
type RsListFunc func(string, metav1.ListOptions) ([]*extensions.ReplicaSet, error)
type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error)
type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error)
// ListReplicaSets returns a slice of RSes the given deployment targets.
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([]*extensions.ReplicaSet, error) {
func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) {
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
namespace := deployment.Namespace
@ -565,7 +566,7 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([
return nil, err
}
// Only include those whose ControllerRef matches the Deployment.
owned := make([]*extensions.ReplicaSet, 0, len(all))
owned := make([]*apps.ReplicaSet, 0, len(all))
for _, rs := range all {
if metav1.IsControlledBy(rs, deployment) {
owned = append(owned, rs)
@ -603,7 +604,7 @@ func ListReplicaSetsInternal(deployment *internalextensions.Deployment, getRSLis
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListPods(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) {
func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) {
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
@ -640,13 +641,13 @@ func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy()
// Remove hash labels from template.Labels before comparing
delete(t1Copy.Labels, extensions.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, extensions.DefaultDeploymentUniqueLabelKey)
delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
}
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) *extensions.ReplicaSet {
func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet {
sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList))
for i := range rsList {
if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
@ -663,9 +664,9 @@ func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.R
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet) {
var requiredRSs []*extensions.ReplicaSet
var allRSs []*extensions.ReplicaSet
func FindOldReplicaSets(deployment *apps.Deployment, rsList []*apps.ReplicaSet) ([]*apps.ReplicaSet, []*apps.ReplicaSet) {
var requiredRSs []*apps.ReplicaSet
var allRSs []*apps.ReplicaSet
newRS := FindNewReplicaSet(deployment, rsList)
for _, rs := range rsList {
// Filter out new replica set
@ -681,17 +682,17 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.
}
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template v1.PodTemplateSpec) *extensions.Deployment {
func SetFromReplicaSetTemplate(deployment *apps.Deployment, template v1.PodTemplateSpec) *apps.Deployment {
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
deployment.Spec.Template.Spec = template.Spec
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
deployment.Spec.Template.ObjectMeta.Labels,
extensions.DefaultDeploymentUniqueLabelKey)
apps.DefaultDeploymentUniqueLabelKey)
return deployment
}
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
func GetReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
@ -702,7 +703,7 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
}
// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets.
func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
func GetActualReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalActualReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
@ -713,7 +714,7 @@ func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) i
}
// GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets.
func GetReadyReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
func GetReadyReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalReadyReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
@ -724,7 +725,7 @@ func GetReadyReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) in
}
// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets.
func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
func GetAvailableReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalAvailableReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
@ -735,13 +736,13 @@ func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet
}
// IsRollingUpdate returns true if the strategy type is a rolling update.
func IsRollingUpdate(deployment *extensions.Deployment) bool {
return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType
func IsRollingUpdate(deployment *apps.Deployment) bool {
return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType
}
// DeploymentComplete considers a deployment to be complete once all of its desired replicas
// are updated and available, and no old pods are running.
func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
func DeploymentComplete(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
newStatus.Replicas == *(deployment.Spec.Replicas) &&
newStatus.AvailableReplicas == *(deployment.Spec.Replicas) &&
@ -752,7 +753,7 @@ func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions
// current with the new status of the deployment that the controller is observing. More specifically,
// when new pods are scaled up or become ready or available, or old pods are scaled down, then we
// consider the deployment is progressing.
func DeploymentProgressing(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
func DeploymentProgressing(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
oldStatus := deployment.Status
// Old replicas that need to be scaled down
@ -771,7 +772,7 @@ var nowFn = func() time.Time { return time.Now() }
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
// exists.
func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
if deployment.Spec.ProgressDeadlineSeconds == nil {
return false
}
@ -779,7 +780,7 @@ func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions
// Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress.
// If it's already set with a TimedOutReason reason, we have already timed out, no need to check
// again.
condition := GetDeploymentCondition(*newStatus, extensions.DeploymentProgressing)
condition := GetDeploymentCondition(*newStatus, apps.DeploymentProgressing)
if condition == nil {
return false
}
@ -817,9 +818,9 @@ func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions
// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it.
// 1) The new RS is saturated: newRS's replicas == deployment's replicas
// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas
func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) {
func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) (int32, error) {
switch deployment.Spec.Strategy.Type {
case extensions.RollingUpdateDeploymentStrategyType:
case apps.RollingUpdateDeploymentStrategyType:
// Check if we can scale up.
maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true)
if err != nil {
@ -837,7 +838,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
// Do not exceed the number of desired replicas.
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))))
return *(newRS.Spec.Replicas) + scaleUpCount, nil
case extensions.RecreateDeploymentStrategyType:
case apps.RecreateDeploymentStrategyType:
return *(deployment.Spec.Replicas), nil
default:
return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type)
@ -848,7 +849,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
// Both the deployment and the replica set have to believe this replica set can own all of the desired
// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet
// need to be available.
func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool {
func IsSaturated(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
if rs == nil {
return false
}
@ -864,7 +865,7 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
// Returns error if polling timesout.
func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
// TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface.
return wait.PollImmediate(interval, timeout, func() (bool, error) {
deployment, err := getDeploymentFunc()

View File

@ -25,8 +25,8 @@ import (
"testing"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -53,7 +53,7 @@ func addListPodsReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Cl
}
func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset {
rsList, ok := obj.(*extensions.ReplicaSetList)
rsList, ok := obj.(*apps.ReplicaSetList)
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
name := action.(core.GetAction).GetName()
if ok {
@ -71,7 +71,7 @@ func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clien
func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
obj := action.(core.UpdateAction).GetObject().(*apps.ReplicaSet)
return true, obj, nil
})
return fakeClient
@ -85,13 +85,13 @@ func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset {
return fakeClient
}
func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet {
return extensions.ReplicaSet{
func generateRSWithLabel(labels map[string]string, image string) apps.ReplicaSet {
return apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: names.SimpleNameGenerator.GenerateName("replicaset"),
Labels: labels,
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Replicas: func(i int32) *int32 { return &i }(1),
Selector: &metav1.LabelSelector{MatchLabels: labels},
Template: v1.PodTemplateSpec{
@ -113,10 +113,10 @@ func generateRSWithLabel(labels map[string]string, image string) extensions.Repl
}
}
func newDControllerRef(d *extensions.Deployment) *metav1.OwnerReference {
func newDControllerRef(d *apps.Deployment) *metav1.OwnerReference {
isController := true
return &metav1.OwnerReference{
APIVersion: "extensions/v1beta1",
APIVersion: "apps/v1",
Kind: "Deployment",
Name: d.GetName(),
UID: d.GetUID(),
@ -125,16 +125,16 @@ func newDControllerRef(d *extensions.Deployment) *metav1.OwnerReference {
}
// generateRS creates a replica set, with the input deployment's template as its template
func generateRS(deployment extensions.Deployment) extensions.ReplicaSet {
func generateRS(deployment apps.Deployment) apps.ReplicaSet {
template := deployment.Spec.Template.DeepCopy()
return extensions.ReplicaSet{
return apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
UID: randomUID(),
Name: names.SimpleNameGenerator.GenerateName("replicaset"),
Labels: template.Labels,
OwnerReferences: []metav1.OwnerReference{*newDControllerRef(&deployment)},
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Replicas: new(int32),
Template: *template,
Selector: &metav1.LabelSelector{MatchLabels: template.Labels},
@ -147,15 +147,15 @@ func randomUID() types.UID {
}
// generateDeployment creates a deployment, with the input image as its template
func generateDeployment(image string) extensions.Deployment {
func generateDeployment(image string) apps.Deployment {
podLabels := map[string]string{"name": image}
terminationSec := int64(30)
return extensions.Deployment{
return apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: image,
Annotations: make(map[string]string),
},
Spec: extensions.DeploymentSpec{
Spec: apps.DeploymentSpec{
Replicas: func(i int32) *int32 { return &i }(1),
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
Template: v1.PodTemplateSpec{
@ -188,14 +188,14 @@ func TestGetNewRS(t *testing.T) {
tests := []struct {
Name string
objs []runtime.Object
expected *extensions.ReplicaSet
expected *apps.ReplicaSet
}{
{
"No new ReplicaSet",
[]runtime.Object{
&v1.PodList{},
&extensions.ReplicaSetList{
Items: []extensions.ReplicaSet{
&apps.ReplicaSetList{
Items: []apps.ReplicaSet{
generateRS(generateDeployment("foo")),
generateRS(generateDeployment("bar")),
},
@ -207,8 +207,8 @@ func TestGetNewRS(t *testing.T) {
"Has new ReplicaSet",
[]runtime.Object{
&v1.PodList{},
&extensions.ReplicaSetList{
Items: []extensions.ReplicaSet{
&apps.ReplicaSetList{
Items: []apps.ReplicaSet{
generateRS(generateDeployment("foo")),
generateRS(generateDeployment("bar")),
generateRS(generateDeployment("abc")),
@ -228,7 +228,7 @@ func TestGetNewRS(t *testing.T) {
fakeClient = addListRSReactor(fakeClient, test.objs[1])
fakeClient = addUpdatePodsReactor(fakeClient)
fakeClient = addUpdateRSReactor(fakeClient)
rs, err := GetNewReplicaSet(&newDeployment, fakeClient.ExtensionsV1beta1())
rs, err := GetNewReplicaSet(&newDeployment, fakeClient.AppsV1())
if err != nil {
t.Errorf("In test case %s, got unexpected error %v", test.Name, err)
}
@ -262,13 +262,13 @@ func TestGetOldRSs(t *testing.T) {
tests := []struct {
Name string
objs []runtime.Object
expected []*extensions.ReplicaSet
expected []*apps.ReplicaSet
}{
{
"No old ReplicaSets",
[]runtime.Object{
&extensions.ReplicaSetList{
Items: []extensions.ReplicaSet{
&apps.ReplicaSetList{
Items: []apps.ReplicaSet{
generateRS(generateDeployment("foo")),
newRS,
generateRS(generateDeployment("bar")),
@ -280,8 +280,8 @@ func TestGetOldRSs(t *testing.T) {
{
"Has old ReplicaSet",
[]runtime.Object{
&extensions.ReplicaSetList{
Items: []extensions.ReplicaSet{
&apps.ReplicaSetList{
Items: []apps.ReplicaSet{
oldRS2,
oldRS,
existedRS,
@ -291,7 +291,7 @@ func TestGetOldRSs(t *testing.T) {
},
},
},
[]*extensions.ReplicaSet{&oldRS, &oldRS2},
[]*apps.ReplicaSet{&oldRS, &oldRS2},
},
}
@ -301,7 +301,7 @@ func TestGetOldRSs(t *testing.T) {
fakeClient = addListRSReactor(fakeClient, test.objs[0])
fakeClient = addGetRSReactor(fakeClient, test.objs[0])
fakeClient = addUpdateRSReactor(fakeClient)
_, rss, err := GetOldReplicaSets(&newDeployment, fakeClient.ExtensionsV1beta1())
_, rss, err := GetOldReplicaSets(&newDeployment, fakeClient.AppsV1())
if err != nil {
t.Errorf("In test case %s, got unexpected error %v", test.Name, err)
}
@ -340,56 +340,56 @@ func TestEqualIgnoreHash(t *testing.T) {
}{
{
"Same spec, same labels",
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
true,
},
{
"Same spec, only pod-template-hash label value is different",
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
true,
},
{
"Same spec, the former doesn't have pod-template-hash label",
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
true,
},
{
"Same spec, the label is different, the former doesn't have pod-template-hash label, same number of labels",
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2"}),
false,
},
{
"Same spec, the label is different, the latter doesn't have pod-template-hash label, same number of labels",
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}),
false,
},
{
"Same spec, the label is different, and the pod-template-hash label value is the same",
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
false,
},
{
"Different spec, same labels",
generatePodTemplateSpec("foo", "foo-node", map[string]string{"former": "value"}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{"latter": "value"}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{"former": "value"}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo", "foo-node", map[string]string{"latter": "value"}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
false,
},
{
"Different spec, different pod-template-hash label value",
generatePodTemplateSpec("foo-1", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo-2", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
generatePodTemplateSpec("foo-1", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}),
generatePodTemplateSpec("foo-2", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
false,
},
{
"Different spec, the former doesn't have pod-template-hash label",
generatePodTemplateSpec("foo-1", "foo-node-1", map[string]string{}, map[string]string{"something": "else"}),
generatePodTemplateSpec("foo-2", "foo-node-2", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
generatePodTemplateSpec("foo-2", "foo-node-2", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}),
false,
},
{
@ -431,11 +431,11 @@ func TestFindNewReplicaSet(t *testing.T) {
deployment := generateDeployment("nginx")
newRS := generateRS(deployment)
newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "hash"
newRS.Labels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
newRS.CreationTimestamp = later
newRSDup := generateRS(deployment)
newRSDup.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "different-hash"
newRSDup.Labels[apps.DefaultDeploymentUniqueLabelKey] = "different-hash"
newRSDup.CreationTimestamp = now
oldDeployment := generateDeployment("nginx")
@ -445,26 +445,26 @@ func TestFindNewReplicaSet(t *testing.T) {
tests := []struct {
Name string
deployment extensions.Deployment
rsList []*extensions.ReplicaSet
expected *extensions.ReplicaSet
deployment apps.Deployment
rsList []*apps.ReplicaSet
expected *apps.ReplicaSet
}{
{
Name: "Get new ReplicaSet with the same template as Deployment spec but different pod-template-hash value",
deployment: deployment,
rsList: []*extensions.ReplicaSet{&newRS, &oldRS},
rsList: []*apps.ReplicaSet{&newRS, &oldRS},
expected: &newRS,
},
{
Name: "Get the oldest new ReplicaSet when there are more than one ReplicaSet with the same template",
deployment: deployment,
rsList: []*extensions.ReplicaSet{&newRS, &oldRS, &newRSDup},
rsList: []*apps.ReplicaSet{&newRS, &oldRS, &newRSDup},
expected: &newRSDup,
},
{
Name: "Get nil new ReplicaSet",
deployment: deployment,
rsList: []*extensions.ReplicaSet{&oldRS},
rsList: []*apps.ReplicaSet{&oldRS},
expected: nil,
},
}
@ -486,11 +486,11 @@ func TestFindOldReplicaSets(t *testing.T) {
deployment := generateDeployment("nginx")
newRS := generateRS(deployment)
*(newRS.Spec.Replicas) = 1
newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "hash"
newRS.Labels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
newRS.CreationTimestamp = later
newRSDup := generateRS(deployment)
newRSDup.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "different-hash"
newRSDup.Labels[apps.DefaultDeploymentUniqueLabelKey] = "different-hash"
newRSDup.CreationTimestamp = now
oldDeployment := generateDeployment("nginx")
@ -501,37 +501,37 @@ func TestFindOldReplicaSets(t *testing.T) {
tests := []struct {
Name string
deployment extensions.Deployment
rsList []*extensions.ReplicaSet
deployment apps.Deployment
rsList []*apps.ReplicaSet
podList *v1.PodList
expected []*extensions.ReplicaSet
expectedRequire []*extensions.ReplicaSet
expected []*apps.ReplicaSet
expectedRequire []*apps.ReplicaSet
}{
{
Name: "Get old ReplicaSets",
deployment: deployment,
rsList: []*extensions.ReplicaSet{&newRS, &oldRS},
expected: []*extensions.ReplicaSet{&oldRS},
rsList: []*apps.ReplicaSet{&newRS, &oldRS},
expected: []*apps.ReplicaSet{&oldRS},
expectedRequire: nil,
},
{
Name: "Get old ReplicaSets with no new ReplicaSet",
deployment: deployment,
rsList: []*extensions.ReplicaSet{&oldRS},
expected: []*extensions.ReplicaSet{&oldRS},
rsList: []*apps.ReplicaSet{&oldRS},
expected: []*apps.ReplicaSet{&oldRS},
expectedRequire: nil,
},
{
Name: "Get old ReplicaSets with two new ReplicaSets, only the oldest new ReplicaSet is seen as new ReplicaSet",
deployment: deployment,
rsList: []*extensions.ReplicaSet{&oldRS, &newRS, &newRSDup},
expected: []*extensions.ReplicaSet{&oldRS, &newRS},
expectedRequire: []*extensions.ReplicaSet{&newRS},
rsList: []*apps.ReplicaSet{&oldRS, &newRS, &newRSDup},
expected: []*apps.ReplicaSet{&oldRS, &newRS},
expectedRequire: []*apps.ReplicaSet{&newRS},
},
{
Name: "Get empty old ReplicaSets",
deployment: deployment,
rsList: []*extensions.ReplicaSet{&newRS},
rsList: []*apps.ReplicaSet{&newRS},
expected: nil,
expectedRequire: nil,
},
@ -554,7 +554,7 @@ func TestFindOldReplicaSets(t *testing.T) {
}
// equal compares the equality of two ReplicaSet slices regardless of their ordering
func equal(rss1, rss2 []*extensions.ReplicaSet) bool {
func equal(rss1, rss2 []*apps.ReplicaSet) bool {
if reflect.DeepEqual(rss1, rss2) {
return true
}
@ -583,19 +583,19 @@ func TestGetReplicaCountForReplicaSets(t *testing.T) {
tests := []struct {
Name string
sets []*extensions.ReplicaSet
sets []*apps.ReplicaSet
expectedCount int32
expectedActual int32
}{
{
"1:2 Replicas",
[]*extensions.ReplicaSet{&rs1},
[]*apps.ReplicaSet{&rs1},
1,
2,
},
{
"3:5 Replicas",
[]*extensions.ReplicaSet{&rs1, &rs2},
[]*apps.ReplicaSet{&rs1, &rs2},
3,
5,
},
@ -679,7 +679,7 @@ func TestResolveFenceposts(t *testing.T) {
func TestNewRSNewReplicas(t *testing.T) {
tests := []struct {
Name string
strategyType extensions.DeploymentStrategyType
strategyType apps.DeploymentStrategyType
depReplicas int32
newRSReplicas int32
maxSurge int
@ -687,17 +687,17 @@ func TestNewRSNewReplicas(t *testing.T) {
}{
{
"can not scale up - to newRSReplicas",
extensions.RollingUpdateDeploymentStrategyType,
apps.RollingUpdateDeploymentStrategyType,
1, 5, 1, 5,
},
{
"scale up - to depReplicas",
extensions.RollingUpdateDeploymentStrategyType,
apps.RollingUpdateDeploymentStrategyType,
6, 2, 10, 6,
},
{
"recreate - to depReplicas",
extensions.RecreateDeploymentStrategyType,
apps.RecreateDeploymentStrategyType,
3, 1, 1, 3,
},
}
@ -709,8 +709,8 @@ func TestNewRSNewReplicas(t *testing.T) {
for _, test := range tests {
t.Run(test.Name, func(t *testing.T) {
*(newDeployment.Spec.Replicas) = test.depReplicas
newDeployment.Spec.Strategy = extensions.DeploymentStrategy{Type: test.strategyType}
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
newDeployment.Spec.Strategy = apps.DeploymentStrategy{Type: test.strategyType}
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
MaxUnavailable: func(i int) *intstr.IntOrString {
x := intstr.FromInt(i)
return &x
@ -721,7 +721,7 @@ func TestNewRSNewReplicas(t *testing.T) {
}(test.maxSurge),
}
*(newRC.Spec.Replicas) = test.newRSReplicas
rs, err := NewRSNewReplicas(&newDeployment, []*extensions.ReplicaSet{&rs5}, &newRC)
rs, err := NewRSNewReplicas(&newDeployment, []*apps.ReplicaSet{&rs5}, &newRC)
if err != nil {
t.Errorf("In test case %s, got unexpected error %v", test.Name, err)
}
@ -733,33 +733,33 @@ func TestNewRSNewReplicas(t *testing.T) {
}
var (
condProgressing = func() extensions.DeploymentCondition {
return extensions.DeploymentCondition{
Type: extensions.DeploymentProgressing,
condProgressing = func() apps.DeploymentCondition {
return apps.DeploymentCondition{
Type: apps.DeploymentProgressing,
Status: v1.ConditionFalse,
Reason: "ForSomeReason",
}
}
condProgressing2 = func() extensions.DeploymentCondition {
return extensions.DeploymentCondition{
Type: extensions.DeploymentProgressing,
condProgressing2 = func() apps.DeploymentCondition {
return apps.DeploymentCondition{
Type: apps.DeploymentProgressing,
Status: v1.ConditionTrue,
Reason: "BecauseItIs",
}
}
condAvailable = func() extensions.DeploymentCondition {
return extensions.DeploymentCondition{
Type: extensions.DeploymentAvailable,
condAvailable = func() apps.DeploymentCondition {
return apps.DeploymentCondition{
Type: apps.DeploymentAvailable,
Status: v1.ConditionTrue,
Reason: "AwesomeController",
}
}
status = func() *extensions.DeploymentStatus {
return &extensions.DeploymentStatus{
Conditions: []extensions.DeploymentCondition{condProgressing(), condAvailable()},
status = func() *apps.DeploymentStatus {
return &apps.DeploymentStatus{
Conditions: []apps.DeploymentCondition{condProgressing(), condAvailable()},
}
}
)
@ -770,8 +770,8 @@ func TestGetCondition(t *testing.T) {
tests := []struct {
name string
status extensions.DeploymentStatus
condType extensions.DeploymentConditionType
status apps.DeploymentStatus
condType apps.DeploymentConditionType
expected bool
}{
@ -779,7 +779,7 @@ func TestGetCondition(t *testing.T) {
name: "condition exists",
status: *exampleStatus,
condType: extensions.DeploymentAvailable,
condType: apps.DeploymentAvailable,
expected: true,
},
@ -787,7 +787,7 @@ func TestGetCondition(t *testing.T) {
name: "condition does not exist",
status: *exampleStatus,
condType: extensions.DeploymentReplicaFailure,
condType: apps.DeploymentReplicaFailure,
expected: false,
},
@ -808,23 +808,23 @@ func TestSetCondition(t *testing.T) {
tests := []struct {
name string
status *extensions.DeploymentStatus
cond extensions.DeploymentCondition
status *apps.DeploymentStatus
cond apps.DeploymentCondition
expectedStatus *extensions.DeploymentStatus
expectedStatus *apps.DeploymentStatus
}{
{
name: "set for the first time",
status: &extensions.DeploymentStatus{},
status: &apps.DeploymentStatus{},
cond: condAvailable(),
expectedStatus: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condAvailable()}},
expectedStatus: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condAvailable()}},
},
{
name: "simple set",
status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}},
status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}},
cond: condAvailable(),
expectedStatus: status(),
@ -832,10 +832,10 @@ func TestSetCondition(t *testing.T) {
{
name: "overwrite",
status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}},
status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}},
cond: condProgressing2(),
expectedStatus: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing2()}},
expectedStatus: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing2()}},
},
}
@ -853,32 +853,32 @@ func TestRemoveCondition(t *testing.T) {
tests := []struct {
name string
status *extensions.DeploymentStatus
condType extensions.DeploymentConditionType
status *apps.DeploymentStatus
condType apps.DeploymentConditionType
expectedStatus *extensions.DeploymentStatus
expectedStatus *apps.DeploymentStatus
}{
{
name: "remove from empty status",
status: &extensions.DeploymentStatus{},
condType: extensions.DeploymentProgressing,
status: &apps.DeploymentStatus{},
condType: apps.DeploymentProgressing,
expectedStatus: &extensions.DeploymentStatus{},
expectedStatus: &apps.DeploymentStatus{},
},
{
name: "simple remove",
status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}},
condType: extensions.DeploymentProgressing,
status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}},
condType: apps.DeploymentProgressing,
expectedStatus: &extensions.DeploymentStatus{},
expectedStatus: &apps.DeploymentStatus{},
},
{
name: "doesn't remove anything",
status: status(),
condType: extensions.DeploymentReplicaFailure,
condType: apps.DeploymentReplicaFailure,
expectedStatus: status(),
},
@ -895,19 +895,19 @@ func TestRemoveCondition(t *testing.T) {
}
func TestDeploymentComplete(t *testing.T) {
deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *extensions.Deployment {
return &extensions.Deployment{
Spec: extensions.DeploymentSpec{
deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *apps.Deployment {
return &apps.Deployment{
Spec: apps.DeploymentSpec{
Replicas: &desired,
Strategy: extensions.DeploymentStrategy{
RollingUpdate: &extensions.RollingUpdateDeployment{
Strategy: apps.DeploymentStrategy{
RollingUpdate: &apps.RollingUpdateDeployment{
MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)),
MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxSurge)),
},
Type: extensions.RollingUpdateDeploymentStrategyType,
Type: apps.RollingUpdateDeploymentStrategyType,
},
},
Status: extensions.DeploymentStatus{
Status: apps.DeploymentStatus{
Replicas: current,
UpdatedReplicas: updated,
AvailableReplicas: available,
@ -918,7 +918,7 @@ func TestDeploymentComplete(t *testing.T) {
tests := []struct {
name string
d *extensions.Deployment
d *apps.Deployment
expected bool
}{
@ -972,9 +972,9 @@ func TestDeploymentComplete(t *testing.T) {
}
func TestDeploymentProgressing(t *testing.T) {
deployment := func(current, updated, ready, available int32) *extensions.Deployment {
return &extensions.Deployment{
Status: extensions.DeploymentStatus{
deployment := func(current, updated, ready, available int32) *apps.Deployment {
return &apps.Deployment{
Status: apps.DeploymentStatus{
Replicas: current,
UpdatedReplicas: updated,
ReadyReplicas: ready,
@ -982,8 +982,8 @@ func TestDeploymentProgressing(t *testing.T) {
},
}
}
newStatus := func(current, updated, ready, available int32) extensions.DeploymentStatus {
return extensions.DeploymentStatus{
newStatus := func(current, updated, ready, available int32) apps.DeploymentStatus {
return apps.DeploymentStatus{
Replicas: current,
UpdatedReplicas: updated,
ReadyReplicas: ready,
@ -994,8 +994,8 @@ func TestDeploymentProgressing(t *testing.T) {
tests := []struct {
name string
d *extensions.Deployment
newStatus extensions.DeploymentStatus
d *apps.Deployment
newStatus apps.DeploymentStatus
expected bool
}{
@ -1075,13 +1075,13 @@ func TestDeploymentTimedOut(t *testing.T) {
timeFn := func(min, sec int) time.Time {
return time.Date(2016, 1, 1, 0, min, sec, 0, time.UTC)
}
deployment := func(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason string, pds *int32, from time.Time) extensions.Deployment {
return extensions.Deployment{
Spec: extensions.DeploymentSpec{
deployment := func(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason string, pds *int32, from time.Time) apps.Deployment {
return apps.Deployment{
Spec: apps.DeploymentSpec{
ProgressDeadlineSeconds: pds,
},
Status: extensions.DeploymentStatus{
Conditions: []extensions.DeploymentCondition{
Status: apps.DeploymentStatus{
Conditions: []apps.DeploymentCondition{
{
Type: condType,
Status: status,
@ -1096,7 +1096,7 @@ func TestDeploymentTimedOut(t *testing.T) {
tests := []struct {
name string
d extensions.Deployment
d apps.Deployment
nowFn func() time.Time
expected bool
@ -1104,28 +1104,28 @@ func TestDeploymentTimedOut(t *testing.T) {
{
name: "no progressDeadlineSeconds specified - no timeout",
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)),
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)),
nowFn: func() time.Time { return timeFn(1, 20) },
expected: false,
},
{
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s",
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 9)),
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 9)),
nowFn: func() time.Time { return timeFn(1, 20) },
expected: true,
},
{
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:11 => 9s",
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 11)),
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 11)),
nowFn: func() time.Time { return timeFn(1, 20) },
expected: false,
},
{
name: "previous status was a complete deployment",
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, NewRSAvailableReason, nil, time.Time{}),
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, NewRSAvailableReason, nil, time.Time{}),
expected: false,
},
}
@ -1141,23 +1141,23 @@ func TestDeploymentTimedOut(t *testing.T) {
}
func TestMaxUnavailable(t *testing.T) {
deployment := func(replicas int32, maxUnavailable intstr.IntOrString) extensions.Deployment {
return extensions.Deployment{
Spec: extensions.DeploymentSpec{
deployment := func(replicas int32, maxUnavailable intstr.IntOrString) apps.Deployment {
return apps.Deployment{
Spec: apps.DeploymentSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Strategy: extensions.DeploymentStrategy{
RollingUpdate: &extensions.RollingUpdateDeployment{
Strategy: apps.DeploymentStrategy{
RollingUpdate: &apps.RollingUpdateDeployment{
MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1)),
MaxUnavailable: &maxUnavailable,
},
Type: extensions.RollingUpdateDeploymentStrategyType,
Type: apps.RollingUpdateDeploymentStrategyType,
},
},
}
}
tests := []struct {
name string
deployment extensions.Deployment
deployment apps.Deployment
expected int32
}{
{
@ -1182,10 +1182,10 @@ func TestMaxUnavailable(t *testing.T) {
},
{
name: "maxUnavailable with Recreate deployment strategy",
deployment: extensions.Deployment{
Spec: extensions.DeploymentSpec{
Strategy: extensions.DeploymentStrategy{
Type: extensions.RecreateDeploymentStrategyType,
deployment: apps.Deployment{
Spec: apps.DeploymentSpec{
Strategy: apps.DeploymentStrategy{
Type: apps.RecreateDeploymentStrategyType,
},
},
},
@ -1285,14 +1285,14 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) {
tests := []struct {
name string
replicaSet *extensions.ReplicaSet
replicaSet *apps.ReplicaSet
expected bool
}{
{
name: "test Annotations nil",
replicaSet: &extensions.ReplicaSet{
replicaSet: &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
@ -1300,13 +1300,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) {
},
{
name: "test desiredReplicas update",
replicaSet: &extensions.ReplicaSet{
replicaSet: &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "test",
Annotations: map[string]string{DesiredReplicasAnnotation: "8", MaxReplicasAnnotation: maxReplicas},
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
@ -1314,13 +1314,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) {
},
{
name: "test maxReplicas update",
replicaSet: &extensions.ReplicaSet{
replicaSet: &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "test",
Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: "16"},
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
@ -1328,13 +1328,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) {
},
{
name: "test needn't update",
replicaSet: &extensions.ReplicaSet{
replicaSet: &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "test",
Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: maxReplicas},
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},

View File

@ -19,21 +19,21 @@ package util
import (
"github.com/golang/glog"
extensions "k8s.io/api/extensions/v1beta1"
apps "k8s.io/api/apps/v1"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
"k8s.io/client-go/util/retry"
)
// TODO: use client library instead when it starts to support update retries
// see https://github.com/kubernetes/kubernetes/issues/21479
type updateRSFunc func(rs *extensions.ReplicaSet) error
type updateRSFunc func(rs *apps.ReplicaSet) error
// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored.
// The returned bool value can be used to tell if the RS is actually updated.
func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister extensionslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) {
var rs *extensions.ReplicaSet
func UpdateRSWithRetries(rsClient appsclient.ReplicaSetInterface, rsLister appslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*apps.ReplicaSet, error) {
var rs *apps.ReplicaSet
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var err error

View File

@ -177,8 +177,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",

View File

@ -102,12 +102,12 @@ type DeploymentHistoryViewer struct {
// ViewHistory returns a revision-to-replicaset map as the revision history of a deployment
// TODO: this should be a describer
func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) {
versionedExtensionsClient := h.c.ExtensionsV1beta1()
deployment, err := versionedExtensionsClient.Deployments(namespace).Get(name, metav1.GetOptions{})
versionedAppsClient := h.c.AppsV1()
deployment, err := versionedAppsClient.Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err)
}
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, versionedExtensionsClient)
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, versionedAppsClient)
if err != nil {
return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", name, err)
}

View File

@ -26,7 +26,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extv1beta1 "k8s.io/api/extensions/v1beta1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -115,10 +115,10 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m
if d.Spec.Paused {
return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", d.Name)
}
deploymentRollback := &extv1beta1.DeploymentRollback{
deploymentRollback := &extensionsv1beta1.DeploymentRollback{
Name: d.Name,
UpdatedAnnotations: updatedAnnotations,
RollbackTo: extv1beta1.RollbackConfig{
RollbackTo: extensionsv1beta1.RollbackConfig{
Revision: toRevision,
},
}
@ -184,12 +184,12 @@ func isRollbackEvent(e *api.Event) (bool, string) {
}
func simpleDryRun(deployment *extensions.Deployment, c kubernetes.Interface, toRevision int64) (string, error) {
externalDeployment := &extv1beta1.Deployment{}
externalDeployment := &appsv1.Deployment{}
if err := legacyscheme.Scheme.Convert(deployment, externalDeployment, nil); err != nil {
return "", fmt.Errorf("failed to convert deployment, %v", err)
}
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(externalDeployment, c.ExtensionsV1beta1())
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(externalDeployment, c.AppsV1())
if err != nil {
return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err)
}

View File

@ -19,12 +19,12 @@ package kubectl
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
clientappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1"
clientextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
@ -38,28 +38,28 @@ type StatusViewer interface {
func StatusViewerFor(kind schema.GroupKind, c kubernetes.Interface) (StatusViewer, error) {
switch kind {
case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), apps.Kind("Deployment"):
return &DeploymentStatusViewer{c.ExtensionsV1beta1()}, nil
return &DeploymentStatusViewer{c.AppsV1()}, nil
case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), apps.Kind("DaemonSet"):
return &DaemonSetStatusViewer{c.ExtensionsV1beta1()}, nil
return &DaemonSetStatusViewer{c.AppsV1()}, nil
case apps.Kind("StatefulSet"):
return &StatefulSetStatusViewer{c.AppsV1beta1()}, nil
return &StatefulSetStatusViewer{c.AppsV1()}, nil
}
return nil, fmt.Errorf("no status viewer has been implemented for %v", kind)
}
// DeploymentStatusViewer implements the StatusViewer interface.
type DeploymentStatusViewer struct {
c clientextensionsv1beta1.DeploymentsGetter
c clientappsv1.DeploymentsGetter
}
// DaemonSetStatusViewer implements the StatusViewer interface.
type DaemonSetStatusViewer struct {
c clientextensionsv1beta1.DaemonSetsGetter
c clientappsv1.DaemonSetsGetter
}
// StatefulSetStatusViewer implements the StatusViewer interface.
type StatefulSetStatusViewer struct {
c clientappsv1beta1.StatefulSetsGetter
c clientappsv1.StatefulSetsGetter
}
// Status returns a message describing deployment status, and a bool value indicating if the status is considered done.
@ -78,7 +78,7 @@ func (s *DeploymentStatusViewer) Status(namespace, name string, revision int64)
}
}
if deployment.Generation <= deployment.Status.ObservedGeneration {
cond := util.GetDeploymentCondition(deployment.Status, extensionsv1beta1.DeploymentProgressing)
cond := util.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
if cond != nil && cond.Reason == util.TimedOutReason {
return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", name)
}
@ -104,7 +104,7 @@ func (s *DaemonSetStatusViewer) Status(namespace, name string, revision int64) (
if err != nil {
return "", false, err
}
if daemon.Spec.UpdateStrategy.Type != extensionsv1beta1.RollingUpdateDaemonSetStrategyType {
if daemon.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType {
return "", true, fmt.Errorf("Status is available only for RollingUpdate strategy type")
}
if daemon.Generation <= daemon.Status.ObservedGeneration {
@ -128,7 +128,7 @@ func (s *StatefulSetStatusViewer) Status(namespace, name string, revision int64)
if sts.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType {
return "", true, fmt.Errorf("%s updateStrategy does not have a Status`", apps.OnDeleteStatefulSetStrategyType)
}
if sts.Status.ObservedGeneration == nil || sts.Generation > *sts.Status.ObservedGeneration {
if sts.Status.ObservedGeneration == 0 || sts.Generation > sts.Status.ObservedGeneration {
return "Waiting for statefulset spec update to be observed...\n", false, nil
}
if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas {

View File

@ -20,9 +20,8 @@ import (
"fmt"
"testing"
apps "k8s.io/api/apps/v1beta1"
apps "k8s.io/api/apps/v1"
api "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
@ -31,14 +30,14 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
tests := []struct {
generation int64
specReplicas int32
status extensions.DeploymentStatus
status apps.DeploymentStatus
msg string
done bool
}{
{
generation: 0,
specReplicas: 1,
status: extensions.DeploymentStatus{
status: apps.DeploymentStatus{
ObservedGeneration: 1,
Replicas: 1,
UpdatedReplicas: 0,
@ -52,7 +51,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
{
generation: 1,
specReplicas: 1,
status: extensions.DeploymentStatus{
status: apps.DeploymentStatus{
ObservedGeneration: 1,
Replicas: 2,
UpdatedReplicas: 1,
@ -66,7 +65,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
{
generation: 1,
specReplicas: 2,
status: extensions.DeploymentStatus{
status: apps.DeploymentStatus{
ObservedGeneration: 1,
Replicas: 2,
UpdatedReplicas: 2,
@ -80,7 +79,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
{
generation: 1,
specReplicas: 2,
status: extensions.DeploymentStatus{
status: apps.DeploymentStatus{
ObservedGeneration: 1,
Replicas: 2,
UpdatedReplicas: 2,
@ -94,7 +93,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
{
generation: 2,
specReplicas: 2,
status: extensions.DeploymentStatus{
status: apps.DeploymentStatus{
ObservedGeneration: 1,
Replicas: 2,
UpdatedReplicas: 2,
@ -108,19 +107,19 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
}
for _, test := range tests {
d := &extensions.Deployment{
d := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: "bar",
Name: "foo",
UID: "8764ae47-9092-11e4-8393-42010af018ff",
Generation: test.generation,
},
Spec: extensions.DeploymentSpec{
Spec: apps.DeploymentSpec{
Replicas: &test.specReplicas,
},
Status: test.status,
}
client := fake.NewSimpleClientset(d).Extensions()
client := fake.NewSimpleClientset(d).Apps()
dsv := &DeploymentStatusViewer{c: client}
msg, done, err := dsv.Status("bar", "foo", 0)
if err != nil {
@ -143,13 +142,13 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
func TestDaemonSetStatusViewerStatus(t *testing.T) {
tests := []struct {
generation int64
status extensions.DaemonSetStatus
status apps.DaemonSetStatus
msg string
done bool
}{
{
generation: 0,
status: extensions.DaemonSetStatus{
status: apps.DaemonSetStatus{
ObservedGeneration: 1,
UpdatedNumberScheduled: 0,
DesiredNumberScheduled: 1,
@ -161,7 +160,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) {
},
{
generation: 1,
status: extensions.DaemonSetStatus{
status: apps.DaemonSetStatus{
ObservedGeneration: 1,
UpdatedNumberScheduled: 2,
DesiredNumberScheduled: 2,
@ -173,7 +172,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) {
},
{
generation: 1,
status: extensions.DaemonSetStatus{
status: apps.DaemonSetStatus{
ObservedGeneration: 1,
UpdatedNumberScheduled: 2,
DesiredNumberScheduled: 2,
@ -185,7 +184,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) {
},
{
generation: 2,
status: extensions.DaemonSetStatus{
status: apps.DaemonSetStatus{
ObservedGeneration: 1,
UpdatedNumberScheduled: 2,
DesiredNumberScheduled: 2,
@ -200,21 +199,21 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) {
for i := range tests {
test := tests[i]
t.Logf("testing scenario %d", i)
d := &extensions.DaemonSet{
d := &apps.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "bar",
Name: "foo",
UID: "8764ae47-9092-11e4-8393-42010af018ff",
Generation: test.generation,
},
Spec: extensions.DaemonSetSpec{
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.RollingUpdateDaemonSetStrategyType,
Spec: apps.DaemonSetSpec{
UpdateStrategy: apps.DaemonSetUpdateStrategy{
Type: apps.RollingUpdateDaemonSetStrategyType,
},
},
Status: test.status,
}
client := fake.NewSimpleClientset(d).Extensions()
client := fake.NewSimpleClientset(d).Apps()
dsv := &DaemonSetStatusViewer{c: client}
msg, done, err := dsv.Status("bar", "foo", 0)
if err != nil {
@ -249,14 +248,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) {
generation: 1,
strategy: apps.StatefulSetUpdateStrategy{Type: apps.OnDeleteStatefulSetStrategyType},
status: apps.StatefulSetStatus{
ObservedGeneration: func() *int64 {
generation := int64(1)
return &generation
}(),
Replicas: 0,
ReadyReplicas: 1,
CurrentReplicas: 0,
UpdatedReplicas: 0,
ObservedGeneration: 1,
Replicas: 0,
ReadyReplicas: 1,
CurrentReplicas: 0,
UpdatedReplicas: 0,
},
msg: "",
@ -268,14 +264,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) {
generation: 2,
strategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
status: apps.StatefulSetStatus{
ObservedGeneration: func() *int64 {
generation := int64(1)
return &generation
}(),
Replicas: 3,
ReadyReplicas: 3,
CurrentReplicas: 3,
UpdatedReplicas: 0,
ObservedGeneration: 1,
Replicas: 3,
ReadyReplicas: 3,
CurrentReplicas: 3,
UpdatedReplicas: 0,
},
msg: "Waiting for statefulset spec update to be observed...\n",
@ -287,14 +280,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) {
generation: 1,
strategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
status: apps.StatefulSetStatus{
ObservedGeneration: func() *int64 {
generation := int64(2)
return &generation
}(),
Replicas: 3,
ReadyReplicas: 2,
CurrentReplicas: 3,
UpdatedReplicas: 0,
ObservedGeneration: 2,
Replicas: 3,
ReadyReplicas: 2,
CurrentReplicas: 3,
UpdatedReplicas: 0,
},
msg: fmt.Sprintf("Waiting for %d pods to be ready...\n", 1),
@ -310,14 +300,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) {
return &apps.RollingUpdateStatefulSetStrategy{Partition: &partition}
}()},
status: apps.StatefulSetStatus{
ObservedGeneration: func() *int64 {
generation := int64(2)
return &generation
}(),
Replicas: 3,
ReadyReplicas: 3,
CurrentReplicas: 2,
UpdatedReplicas: 1,
ObservedGeneration: 2,
Replicas: 3,
ReadyReplicas: 3,
CurrentReplicas: 2,
UpdatedReplicas: 1,
},
msg: fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...\n", 1),
@ -333,14 +320,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) {
return &apps.RollingUpdateStatefulSetStrategy{Partition: &partition}
}()},
status: apps.StatefulSetStatus{
ObservedGeneration: func() *int64 {
generation := int64(2)
return &generation
}(),
Replicas: 3,
ReadyReplicas: 3,
CurrentReplicas: 3,
UpdatedReplicas: 0,
ObservedGeneration: 2,
Replicas: 3,
ReadyReplicas: 3,
CurrentReplicas: 3,
UpdatedReplicas: 0,
},
msg: fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", 0, 1),
@ -352,16 +336,13 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) {
generation: 1,
strategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
status: apps.StatefulSetStatus{
ObservedGeneration: func() *int64 {
generation := int64(2)
return &generation
}(),
Replicas: 3,
ReadyReplicas: 3,
CurrentReplicas: 3,
UpdatedReplicas: 3,
CurrentRevision: "foo",
UpdateRevision: "foo",
ObservedGeneration: 2,
Replicas: 3,
ReadyReplicas: 3,
CurrentReplicas: 3,
UpdatedReplicas: 3,
CurrentRevision: "foo",
UpdateRevision: "foo",
},
msg: fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...\n", 3, "foo"),
@ -375,7 +356,7 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) {
s.Status = test.status
s.Spec.UpdateStrategy = test.strategy
s.Generation = test.generation
client := fake.NewSimpleClientset(s).AppsV1beta1()
client := fake.NewSimpleClientset(s).AppsV1()
dsv := &StatefulSetStatusViewer{c: client}
msg, done, err := dsv.Status(s.Namespace, s.Name, 0)
if test.err && err == nil {
@ -394,19 +375,19 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) {
}
func TestDaemonSetStatusViewerStatusWithWrongUpdateStrategyType(t *testing.T) {
d := &extensions.DaemonSet{
d := &apps.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "bar",
Name: "foo",
UID: "8764ae47-9092-11e4-8393-42010af018ff",
},
Spec: extensions.DaemonSetSpec{
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
Spec: apps.DaemonSetSpec{
UpdateStrategy: apps.DaemonSetUpdateStrategy{
Type: apps.OnDeleteDaemonSetStrategyType,
},
},
}
client := fake.NewSimpleClientset(d).Extensions()
client := fake.NewSimpleClientset(d).Apps()
dsv := &DaemonSetStatusViewer{c: client}
msg, done, err := dsv.Status("bar", "foo", 0)
errMsg := "Status is available only for RollingUpdate strategy type"

View File

@ -32,8 +32,8 @@ go_test(
"//pkg/printers:go_default_library",
"//pkg/util/pointer:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -86,6 +86,7 @@ go_library(
"//pkg/util/slice:go_default_library",
"//vendor/github.com/fatih/camelcase:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",

View File

@ -34,7 +34,7 @@ import (
"github.com/fatih/camelcase"
versionedextension "k8s.io/api/extensions/v1beta1"
appsv1 "k8s.io/api/apps/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@ -3084,7 +3084,7 @@ type DeploymentDescriber struct {
}
func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
d, err := dd.external.ExtensionsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{})
d, err := dd.external.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
@ -3105,7 +3105,7 @@ func (dd *DeploymentDescriber) Describe(namespace, name string, describerSetting
return describeDeployment(d, selector, internalDeployment, events, dd)
}
func describeDeployment(d *versionedextension.Deployment, selector labels.Selector, internalDeployment *extensions.Deployment, events *api.EventList, dd *DeploymentDescriber) (string, error) {
func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *extensions.Deployment, events *api.EventList, dd *DeploymentDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name)
@ -3129,10 +3129,10 @@ func describeDeployment(d *versionedextension.Deployment, selector labels.Select
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.external.ExtensionsV1beta1())
oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.external.AppsV1())
if err == nil {
w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs))
var newRSs []*versionedextension.ReplicaSet
var newRSs []*appsv1.ReplicaSet
if newRS != nil {
newRSs = append(newRSs, newRS)
}
@ -3146,7 +3146,7 @@ func describeDeployment(d *versionedextension.Deployment, selector labels.Select
})
}
func printReplicaSetsByLabels(matchingRSs []*versionedextension.ReplicaSet) string {
func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string {
// Format the matching ReplicaSets into strings.
rsStrings := make([]string, 0, len(matchingRSs))
for _, rs := range matchingRSs {

View File

@ -25,8 +25,8 @@ import (
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -1345,12 +1345,12 @@ func TestPersistentVolumeClaimDescriber(t *testing.T) {
func TestDescribeDeployment(t *testing.T) {
fake := fake.NewSimpleClientset()
versionedFake := versionedfake.NewSimpleClientset(&v1beta1.Deployment{
versionedFake := versionedfake.NewSimpleClientset(&appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "foo",
},
Spec: v1beta1.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(1),
Selector: &metav1.LabelSelector{},
Template: v1.PodTemplateSpec{
@ -1977,12 +1977,12 @@ func TestDescribeEvents(t *testing.T) {
},
"DeploymentDescriber": &DeploymentDescriber{
fake.NewSimpleClientset(events),
versionedfake.NewSimpleClientset(&v1beta1.Deployment{
versionedfake.NewSimpleClientset(&appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "foo",
},
Spec: v1beta1.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(1),
Selector: &metav1.LabelSelector{},
},

View File

@ -39,6 +39,7 @@ go_library(
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",

View File

@ -24,8 +24,8 @@ import (
"strings"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -87,7 +87,7 @@ func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientse
// delete the APIService first to avoid causing discovery errors
_ = aggrclient.ApiregistrationV1beta1().APIServices().Delete("v1alpha1.wardle.k8s.io", nil)
_ = client.ExtensionsV1beta1().Deployments(namespace).Delete("sample-apiserver", nil)
_ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver", nil)
_ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil)
_ = client.CoreV1().Services(namespace).Delete("sample-api", nil)
_ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil)
@ -171,14 +171,18 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
Image: etcdImage,
},
}
d := &extensions.Deployment{
d := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Name: deploymentName,
Labels: podLabels,
},
Spec: extensions.DeploymentSpec{
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Strategy: extensions.DeploymentStrategy{
Type: extensions.RollingUpdateDeploymentStrategyType,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
@ -192,7 +196,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
},
},
}
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(d)
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)

View File

@ -23,8 +23,8 @@ import (
"time"
"k8s.io/api/admissionregistration/v1beta1"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
@ -263,14 +263,18 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
Image: image,
},
}
d := &extensions.Deployment{
d := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Name: deploymentName,
Labels: podLabels,
},
Spec: extensions.DeploymentSpec{
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Strategy: extensions.DeploymentStrategy{
Type: extensions.RollingUpdateDeploymentStrategyType,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
@ -284,7 +288,7 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
},
},
}
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(d)
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
By("Wait for the deployment to be ready")
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
@ -976,7 +980,7 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig
func cleanWebhookTest(client clientset.Interface, namespaceName string) {
_ = client.CoreV1().Services(namespaceName).Delete(serviceName, nil)
_ = client.ExtensionsV1beta1().Deployments(namespaceName).Delete(deploymentName, nil)
_ = client.AppsV1().Deployments(namespaceName).Delete(deploymentName, nil)
_ = client.CoreV1().Secrets(namespaceName).Delete(secretName, nil)
_ = client.RbacV1beta1().RoleBindings("kube-system").Delete(roleBindingName, nil)
}

View File

@ -25,6 +25,7 @@ go_library(
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",

View File

@ -25,6 +25,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
@ -36,7 +37,7 @@ import (
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/kubectl"
@ -51,7 +52,7 @@ const (
)
var (
nilRs *extensions.ReplicaSet
nilRs *apps.ReplicaSet
)
var _ = SIGDescribe("Deployment", func() {
@ -101,7 +102,7 @@ var _ = SIGDescribe("Deployment", func() {
})
func failureTrap(c clientset.Interface, ns string) {
deployments, err := c.ExtensionsV1beta1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
return
@ -110,7 +111,7 @@ func failureTrap(c clientset.Interface, ns string) {
d := deployments.Items[i]
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.ExtensionsV1beta1())
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1())
if err != nil {
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
return
@ -127,7 +128,7 @@ func failureTrap(c clientset.Interface, ns string) {
return
}
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
return
@ -160,11 +161,11 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
}
func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, scaleClient scaleclient.ScalesGetter, ns, deploymentName string) {
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting deployment %s", deploymentName)
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient, scaleClient)
reaper, err := kubectl.ReaperFor(appsinternal.Kind("Deployment"), internalClient, scaleClient)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
@ -172,14 +173,14 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
_, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue())
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
options := metav1.ListOptions{LabelSelector: selector.String()}
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
rss, err := c.AppsV1().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(rss.Items).Should(HaveLen(0))
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
@ -208,9 +209,9 @@ func testDeleteDeployment(f *framework.Framework) {
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
framework.Logf("Creating simple deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
@ -220,9 +221,9 @@ func testDeleteDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentComplete(c, deploy)
Expect(err).NotTo(HaveOccurred())
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
Expect(newRS).NotTo(Equal(nilRs))
stopDeployment(c, internalClient, f.ScalesGetter, ns, deploymentName)
@ -243,10 +244,10 @@ func testRollingUpdateDeployment(f *framework.Framework) {
rsRevision := "3546343826724305832"
annotations := make(map[string]string)
annotations[deploymentutil.RevisionAnnotation] = rsRevision
rs := newExtensionsRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
rs.Annotations = annotations
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
@ -255,8 +256,8 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 3546343826724305833.
@ -270,9 +271,9 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// There should be 1 old RS (nginx-controller, which is adopted)
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
Expect(len(allOldRSs)).Should(Equal(1))
}
@ -284,8 +285,8 @@ func testRecreateDeployment(f *framework.Framework) {
// Create a deployment that brings up redis pods.
deploymentName := "test-recreate-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType)
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
@ -298,7 +299,7 @@ func testRecreateDeployment(f *framework.Framework) {
// Update deployment to delete redis pods and bring up nginx pods.
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
update.Spec.Template.Spec.Containers[0].Image = NginxImage
})
@ -321,7 +322,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
rsName := "test-cleanup-controller"
replicas := int32(1)
revisionHistoryLimit := utilpointer.Int32Ptr(0)
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newExtensionsRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
@ -368,9 +369,9 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}
}
}()
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
_, err = c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
@ -392,7 +393,7 @@ func testRolloverDeployment(f *framework.Framework) {
rsName := "test-rollover-controller"
rsReplicas := int32(1)
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newExtensionsRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
@ -407,19 +408,19 @@ func testRolloverDeployment(f *framework.Framework) {
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
deploymentReplicas := int32(1)
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %q", deploymentName)
newDeployment := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
MaxUnavailable: intOrStrP(0),
MaxSurge: intOrStrP(1),
}
newDeployment.Spec.MinReadySeconds = int32(10)
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(newDeployment)
_, err = c.AppsV1().Deployments(ns).Create(newDeployment)
Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected.
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
@ -430,17 +431,17 @@ func testRolloverDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensure that both replica sets have 1 created replica")
oldRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(oldRS, int32(1))
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
ensureReplicas(newRS, int32(1))
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
})
@ -461,16 +462,16 @@ func testRolloverDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensure that both old replica sets have no replicas")
oldRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(oldRS, int32(0))
// Not really the new replica set anymore but we GET by name so that's fine.
newRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(newRS, int32(0))
}
func ensureReplicas(rs *extensions.ReplicaSet, replicas int32) {
func ensureReplicas(rs *apps.ReplicaSet, replicas int32) {
Expect(*rs.Spec.Replicas).Should(Equal(replicas))
Expect(rs.Status.Replicas).Should(Equal(replicas))
}
@ -490,12 +491,12 @@ func testRollbackDeployment(f *framework.Framework) {
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
deploymentReplicas := int32(1)
deploymentImage := NginxImage
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
createAnnotation := map[string]string{"action": "create", "author": "node"}
d.Annotations = createAnnotation
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
@ -513,7 +514,7 @@ func testRollbackDeployment(f *framework.Framework) {
updatedDeploymentImage := RedisImage
updatedDeploymentImageName := RedisImageName
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
update.Annotations = updateAnnotation
@ -616,7 +617,7 @@ func testRollbackDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
}
func randomScale(d *extensions.Deployment, i int) {
func randomScale(d *apps.Deployment, i int) {
switch r := rand.Float32(); {
case r < 0.3:
framework.Logf("%02d: scaling up", i)
@ -641,12 +642,12 @@ func testIterativeDeployments(f *framework.Framework) {
// Create a nginx deployment.
deploymentName := "nginx"
thirty := int32(30)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
iterations := 20
@ -659,7 +660,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.2:
// trigger a new deployment
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i)
@ -669,16 +670,18 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.4:
// rollback to the previous version
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
rollbackTo := &extensions.RollbackConfig{Revision: 0}
update.Spec.RollbackTo = rollbackTo
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
if update.Annotations == nil {
update.Annotations = make(map[string]string)
}
update.Annotations[apps.DeprecatedRollbackTo] = "0"
})
Expect(err).NotTo(HaveOccurred())
case n < 0.6:
// just scaling
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
@ -687,14 +690,14 @@ func testIterativeDeployments(f *framework.Framework) {
// toggling the deployment
if deployment.Spec.Paused {
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Paused = true
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
} else {
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Paused = false
randomScale(update, i)
})
@ -728,10 +731,10 @@ func testIterativeDeployments(f *framework.Framework) {
}
// unpause the deployment if we end up pausing it
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if deployment.Spec.Paused {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Paused = false
})
}
@ -743,7 +746,7 @@ func testIterativeDeployments(f *framework.Framework) {
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(HaveOccurred())
}
func testDeploymentsControllerRef(f *framework.Framework) {
@ -754,8 +757,8 @@ func testDeploymentsControllerRef(f *framework.Framework) {
framework.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deploy)
Expect(err).NotTo(HaveOccurred())
@ -781,8 +784,8 @@ func testDeploymentsControllerRef(f *framework.Framework) {
deploymentName = "test-adopt-deployment"
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deploy)
Expect(err).NotTo(HaveOccurred())
@ -811,13 +814,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Create a nginx deployment.
deploymentName := "nginx-deployment"
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(apps.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
@ -831,13 +834,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
// Update the deployment with a non-existent image so that the new replica set
// will be blocked to simulate a partial rollout.
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
})
Expect(err).NotTo(HaveOccurred())
@ -860,13 +863,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), firstRS)
err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
Expect(err).NotTo(HaveOccurred())
// Checking state of second rollout's replicaset.
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
@ -883,9 +886,9 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), secondRS)
err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
Expect(err).NotTo(HaveOccurred())
// Check the deployment's minimum availability.
@ -897,15 +900,15 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Scale the deployment to 30 replicas.
newReplicas = int32(30)
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Replicas = &newReplicas
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
@ -943,18 +946,18 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m
}
}
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *extensions.ReplicaSetList {
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *apps.ReplicaSetList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
rsList, err := c.AppsV1().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(rsList.Items)).To(BeNumerically(">", 0))
return rsList
}
func orphanDeploymentReplicaSets(c clientset.Interface, d *extensions.Deployment) error {
func orphanDeploymentReplicaSets(c clientset.Interface, d *apps.Deployment) error {
trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
return c.ExtensionsV1beta1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
return c.AppsV1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
}

View File

@ -22,7 +22,6 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -41,7 +40,8 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa
zero := int64(0)
return &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: rsName,
Name: rsName,
Labels: rsPodLabels,
},
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
@ -66,33 +66,6 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa
}
}
// TODO(#55714): Remove this when Deployment tests use apps/v1 ReplicaSet.
func newExtensionsRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
zero := int64(0)
return &extensions.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: rsName,
},
Spec: extensions.ReplicaSetSpec{
Replicas: &replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: rsPodLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: imageName,
Image: image,
},
},
},
},
},
}
}
func newPodQuota(name, number string) *v1.ResourceQuota {
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{

View File

@ -29,6 +29,7 @@ go_library(
"//vendor/github.com/evanphx/json-patch:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",

View File

@ -23,8 +23,8 @@ import (
"strings"
"time"
apps "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
@ -222,36 +222,36 @@ var _ = SIGDescribe("Advanced Audit", func() {
{
func() {
podLabels := map[string]string{"name": "audit-deployment-pod"}
d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), extensions.RecreateDeploymentStrategyType)
d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), apps.RecreateDeploymentStrategyType)
_, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Create(d)
_, err := f.ClientSet.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "failed to create audit-deployment")
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Get(d.Name, metav1.GetOptions{})
_, err = f.ClientSet.AppsV1().Deployments(namespace).Get(d.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get audit-deployment")
deploymentChan, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Watch(watchOptions)
deploymentChan, err := f.ClientSet.AppsV1().Deployments(namespace).Watch(watchOptions)
framework.ExpectNoError(err, "failed to create watch for deployments")
for range deploymentChan.ResultChan() {
}
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Update(d)
_, err = f.ClientSet.AppsV1().Deployments(namespace).Update(d)
framework.ExpectNoError(err, "failed to update audit-deployment")
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch)
_, err = f.ClientSet.AppsV1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch)
framework.ExpectNoError(err, "failed to patch deployment")
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).List(metav1.ListOptions{})
_, err = f.ClientSet.AppsV1().Deployments(namespace).List(metav1.ListOptions{})
framework.ExpectNoError(err, "failed to create list deployments")
err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{})
err = f.ClientSet.AppsV1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete deployments")
},
[]auditEvent{
{
v1beta1.LevelRequestResponse,
v1beta1.StageResponseComplete,
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments", namespace),
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments", namespace),
"create",
201,
auditTestUser,
@ -263,7 +263,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
}, {
v1beta1.LevelRequest,
v1beta1.StageResponseComplete,
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace),
"get",
200,
auditTestUser,
@ -275,7 +275,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
}, {
v1beta1.LevelRequest,
v1beta1.StageResponseComplete,
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments", namespace),
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments", namespace),
"list",
200,
auditTestUser,
@ -287,7 +287,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
}, {
v1beta1.LevelRequest,
v1beta1.StageResponseStarted,
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
"watch",
200,
auditTestUser,
@ -299,7 +299,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
}, {
v1beta1.LevelRequest,
v1beta1.StageResponseComplete,
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
"watch",
200,
auditTestUser,
@ -311,7 +311,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
}, {
v1beta1.LevelRequestResponse,
v1beta1.StageResponseComplete,
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace),
"update",
200,
auditTestUser,
@ -323,7 +323,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
}, {
v1beta1.LevelRequestResponse,
v1beta1.StageResponseComplete,
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace),
"patch",
200,
auditTestUser,
@ -335,7 +335,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
}, {
v1beta1.LevelRequestResponse,
v1beta1.StageResponseComplete,
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace),
"delete",
200,
auditTestUser,

View File

@ -141,9 +141,9 @@ go_library(
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/restmapper:go_default_library",

View File

@ -22,36 +22,36 @@ import (
. "github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
testutils "k8s.io/kubernetes/test/utils"
)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*extensions.ReplicaSet
var d *extensions.Deployment
var oldRSs []*apps.ReplicaSet
var d *apps.Deployment
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
d = deployment
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
if err != nil {
return false, err
}
@ -64,7 +64,7 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
return pollErr
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
func logReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) {
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
}
@ -72,7 +72,7 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
}
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error {
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, pollLongTimeout)
}
@ -83,16 +83,17 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
}
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType apps.DeploymentStrategyType) *apps.Deployment {
zero := int64(0)
return &extensions.Deployment{
return &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Name: deploymentName,
Labels: podLabels,
},
Spec: extensions.DeploymentSpec{
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
Strategy: extensions.DeploymentStrategy{
Strategy: apps.DeploymentStrategy{
Type: strategyType,
},
Template: v1.PodTemplateSpec{
@ -116,13 +117,13 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
// such as shortly after a scaling event or the deployment is just created.
func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment) error {
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error {
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, pollLongTimeout)
}
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment) error {
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error {
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
}
@ -139,12 +140,12 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
if d.Spec.Strategy.Type != apps.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}
w, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return err
}
@ -152,12 +153,12 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
status := d.Status
condition := func(event watch.Event) (bool, error) {
d := event.Object.(*extensions.Deployment)
d := event.Object.(*apps.Deployment)
status = d.Status
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.ExtensionsV1beta1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.ExtensionsV1beta1())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
if err == nil && nerr == nil {
Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
@ -179,7 +180,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
}
func ScaleDeployment(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments"))
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, appsinternal.Kind("Deployment"), appsinternal.Resource("deployments"))
}
func RunDeployment(config testutils.DeploymentConfig) error {
@ -189,13 +190,13 @@ func RunDeployment(config testutils.DeploymentConfig) error {
return testutils.RunDeployment(config)
}
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet) {
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
}
func WaitForDeploymentRevision(c clientset.Interface, d *extensions.Deployment, targetRevision string) error {
func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error {
err := wait.PollImmediate(Poll, pollLongTimeout, func() (bool, error) {
deployment, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -213,9 +214,9 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
}
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) {
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*apps.Deployment, error) {
deploymentSpec := MakeDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(deploymentSpec)
deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec)
if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
}
@ -229,18 +230,18 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
// MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment {
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *apps.Deployment {
if len(command) == 0 {
command = "while true; do sleep 1; done"
}
zero := int64(0)
deploymentName := "deployment-" + string(uuid.NewUUID())
deploymentSpec := &extensions.Deployment{
deploymentSpec := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: namespace,
},
Spec: extensions.DeploymentSpec{
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
@ -280,8 +281,8 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
}
// GetPodsForDeployment gets pods for the given deployment
func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Deployment) (*v1.PodList, error) {
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.ExtensionsV1beta1())
func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
if err != nil {
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
}
@ -291,7 +292,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Dep
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return client.CoreV1().Pods(namespace).List(options)
}
rsList := []*extensions.ReplicaSet{replicaSet}
rsList := []*apps.ReplicaSet{replicaSet}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
if err != nil {
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)

View File

@ -23,16 +23,15 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
testutils "k8s.io/kubernetes/test/utils"
)
type updateRsFunc func(d *extensions.ReplicaSet)
type updateRsFunc func(d *apps.ReplicaSet)
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
@ -40,11 +39,11 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string,
// CheckNewRSAnnotations check if the new RS's annotation is as expected
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return err
}
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
if err != nil {
return err
}
@ -60,7 +59,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
rs, err := c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -73,7 +72,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
}
// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) error {
func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *apps.ReplicaSet) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
@ -89,10 +88,10 @@ func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGette
}
// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -105,10 +104,10 @@ func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *exte
}
// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}

View File

@ -21,8 +21,8 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolume
By("Creating a deployment with the provisioned volume")
deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
defer c.ExtensionsV1beta1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Expanding current pvc")
newSize := resource.MustParse("6Gi")
@ -152,7 +152,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolume
})
})
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *extensions.Deployment) (v1.Pod, error) {
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *apps.Deployment) (v1.Pod, error) {
var runningPod v1.Pod
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)

View File

@ -53,8 +53,8 @@ go_library(
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -26,8 +26,8 @@ import (
"github.com/vmware/govmomi/object"
vimtypes "github.com/vmware/govmomi/vim25/types"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -140,7 +140,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
})
// Wait until the pod failed over to a different node, or time out after 3 minutes
func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode string) (string, error) {
func waitForPodToFailover(client clientset.Interface, deployment *apps.Deployment, oldNode string) (string, error) {
var (
err error
newNode string
@ -175,7 +175,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *extensions.Dep
}
// getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (string, error) {
func getNodeForDeployment(client clientset.Interface, deployment *apps.Deployment) (string, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
if err != nil {
return "", err

View File

@ -19,7 +19,7 @@ package upgrades
import (
"fmt"
extensions "k8s.io/api/extensions/v1beta1"
apps "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
@ -54,11 +54,11 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
nginxImage := imageutils.GetE2EImage(imageutils.NginxSlim)
ns := f.Namespace.Name
deploymentClient := c.ExtensionsV1beta1().Deployments(ns)
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
deploymentClient := c.AppsV1().Deployments(ns)
rsClient := c.AppsV1().ReplicaSets(ns)
By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, extensions.RollingUpdateDeploymentStrategyType)
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d)
framework.ExpectNoError(err)
@ -81,7 +81,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
// Trigger a new rollout so that we have some history.
By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
})
framework.ExpectNoError(err)
@ -121,8 +121,8 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
c := f.ClientSet
ns := f.Namespace.Name
deploymentClient := c.ExtensionsV1beta1().Deployments(ns)
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
deploymentClient := c.AppsV1().Deployments(ns)
rsClient := c.AppsV1().ReplicaSets(ns)
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
@ -157,7 +157,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
// Verify the upgraded deployment is active by scaling up the deployment by 1
By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *extensions.Deployment) {
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
})
framework.ExpectNoError(err)

View File

@ -19,8 +19,8 @@ go_test(
"//pkg/controller/deployment/util:go_default_library",
"//pkg/util/pointer:go_default_library",
"//test/integration/framework:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
@ -41,6 +41,7 @@ go_library(
"//pkg/util/metrics:go_default_library",
"//test/integration/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -22,8 +22,8 @@ import (
"strings"
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
@ -47,7 +47,7 @@ func TestNewDeployment(t *testing.T) {
tester.deployment.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
@ -128,14 +128,14 @@ func TestDeploymentRollingUpdate(t *testing.T) {
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
tester.deployment.Spec.MinReadySeconds = 4
quarter := intstr.FromString("25%")
tester.deployment.Spec.Strategy.RollingUpdate = &v1beta1.RollingUpdateDeployment{
tester.deployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
MaxUnavailable: &quarter,
MaxSurge: &quarter,
}
// Create a deployment.
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
@ -152,7 +152,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
if oriImage == image {
t.Fatalf("bad test setup, deployment %s roll out with the same image", tester.deployment.Name)
}
imageFn := func(update *v1beta1.Deployment) {
imageFn := func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = image
}
tester.deployment, err = tester.updateDeployment(imageFn)
@ -186,7 +186,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
// 3. Roll over a deployment before the previous rolling update finishes.
image = "dont-finish"
imageFn = func(update *v1beta1.Deployment) {
imageFn = func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = image
}
tester.deployment, err = tester.updateDeployment(imageFn)
@ -199,7 +199,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
// We don't mark pods as ready so that rollout won't finish.
// Before the rollout finishes, trigger another rollout.
image = "rollover"
imageFn = func(update *v1beta1.Deployment) {
imageFn = func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = image
}
tester.deployment, err = tester.updateDeployment(imageFn)
@ -212,7 +212,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
t.Fatal(err)
}
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.ExtensionsV1beta1())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.AppsV1())
if err != nil {
t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err)
}
@ -232,13 +232,18 @@ func TestDeploymentSelectorImmutability(t *testing.T) {
defer framework.DeleteTestingNamespace(ns, s, t)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, int32(20))}
deploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
var err error
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create extensions/v1beta1 deployment %s: %v", tester.deployment.Name, err)
t.Fatalf("failed to create apps/v1 deployment %s: %v", tester.deployment.Name, err)
}
// test to ensure extensions/v1beta1 selector is mutable
newSelectorLabels := map[string]string{"name_extensions_v1beta1": "test_extensions_v1beta1"}
deploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get extensions/v1beta deployment %s: %v", name, err)
}
deploymentExtensionsV1beta1.Spec.Selector.MatchLabels = newSelectorLabels
deploymentExtensionsV1beta1.Spec.Template.Labels = newSelectorLabels
updatedDeploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Update(deploymentExtensionsV1beta1)
@ -283,6 +288,22 @@ func TestDeploymentSelectorImmutability(t *testing.T) {
if !strings.Contains(err.Error(), expectedErrType) || !strings.Contains(err.Error(), expectedErrDetail) {
t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error())
}
// test to ensure apps/v1 selector is immutable
deploymentAppsV1, err := c.AppsV1().Deployments(ns.Name).Get(updatedDeploymentAppsV1beta1.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get apps/v1 deployment %s: %v", updatedDeploymentAppsV1beta1.Name, err)
}
newSelectorLabels = map[string]string{"name_apps_v1": "test_apps_v1"}
deploymentAppsV1.Spec.Selector.MatchLabels = newSelectorLabels
deploymentAppsV1.Spec.Template.Labels = newSelectorLabels
_, err = c.AppsV1().Deployments(ns.Name).Update(deploymentAppsV1)
if err == nil {
t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1 deployment %s", deploymentAppsV1.Name)
}
if !strings.Contains(err.Error(), expectedErrType) || !strings.Contains(err.Error(), expectedErrDetail) {
t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error())
}
}
// Paused deployment should not start new rollout
@ -300,7 +321,7 @@ func TestPausedDeployment(t *testing.T) {
tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
@ -358,7 +379,7 @@ func TestPausedDeployment(t *testing.T) {
// Update the deployment template
newTGPS := int64(0)
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
update.Spec.Template.Spec.TerminationGracePeriodSeconds = &newTGPS
})
if err != nil {
@ -375,7 +396,7 @@ func TestPausedDeployment(t *testing.T) {
t.Fatal(err)
}
_, allOldRs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.ExtensionsV1beta1())
_, allOldRs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.AppsV1())
if err != nil {
t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err)
}
@ -401,7 +422,7 @@ func TestScalePausedDeployment(t *testing.T) {
tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
@ -442,7 +463,7 @@ func TestScalePausedDeployment(t *testing.T) {
// Scale the paused deployment.
newReplicas := int32(10)
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
update.Spec.Replicas = &newReplicas
})
if err != nil {
@ -482,7 +503,7 @@ func TestDeploymentHashCollision(t *testing.T) {
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
@ -500,14 +521,14 @@ func TestDeploymentHashCollision(t *testing.T) {
}
// Mock a hash collision
newRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
newRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
if err != nil {
t.Fatalf("failed getting new replicaset of deployment %s: %v", tester.deployment.Name, err)
}
if newRS == nil {
t.Fatalf("unable to find new replicaset of deployment %s", tester.deployment.Name)
}
_, err = tester.updateReplicaSet(newRS.Name, func(update *v1beta1.ReplicaSet) {
_, err = tester.updateReplicaSet(newRS.Name, func(update *apps.ReplicaSet) {
*update.Spec.Template.Spec.TerminationGracePeriodSeconds = int64(5)
})
if err != nil {
@ -516,7 +537,7 @@ func TestDeploymentHashCollision(t *testing.T) {
// Expect deployment collision counter to increment
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
d, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{})
d, err := c.AppsV1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
@ -546,7 +567,7 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) {
rs.Annotations = make(map[string]string)
rs.Annotations["make"] = "difference"
rs.Spec.Template.Spec.Containers[0].Image = "different-image"
_, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Create(rs)
_, err := c.AppsV1().ReplicaSets(ns.Name).Create(rs)
if err != nil {
t.Fatalf("failed to create replicaset %s: %v", rsName, err)
}
@ -554,9 +575,13 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) {
replicas := int32(1)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
oriImage := tester.deployment.Spec.Template.Spec.Containers[0].Image
// Set absolute rollout limits (defaults changed to percentages)
max := intstr.FromInt(1)
tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = &max
tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = &max
// Create a deployment which have different template than the replica set created above.
if tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment); err != nil {
if tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment); err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
@ -595,7 +620,7 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) {
// 2. Update the deployment to revision 2.
updatedImage := "update"
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedImage
update.Spec.Template.Spec.Containers[0].Image = updatedImage
})
@ -644,10 +669,10 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) {
}
}
func checkRSHashLabels(rs *v1beta1.ReplicaSet) (string, error) {
hash := rs.Labels[v1beta1.DefaultDeploymentUniqueLabelKey]
selectorHash := rs.Spec.Selector.MatchLabels[v1beta1.DefaultDeploymentUniqueLabelKey]
templateLabelHash := rs.Spec.Template.Labels[v1beta1.DefaultDeploymentUniqueLabelKey]
func checkRSHashLabels(rs *apps.ReplicaSet) (string, error) {
hash := rs.Labels[apps.DefaultDeploymentUniqueLabelKey]
selectorHash := rs.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey]
templateLabelHash := rs.Spec.Template.Labels[apps.DefaultDeploymentUniqueLabelKey]
if hash != selectorHash || selectorHash != templateLabelHash {
return "", fmt.Errorf("mismatching hash value found in replicaset %s: %#v", rs.Name, rs)
@ -665,7 +690,7 @@ func checkPodsHashLabel(pods *v1.PodList) (string, error) {
}
var hash string
for _, pod := range pods.Items {
podHash := pod.Labels[v1beta1.DefaultDeploymentUniqueLabelKey]
podHash := pod.Labels[apps.DefaultDeploymentUniqueLabelKey]
if len(podHash) == 0 {
return "", fmt.Errorf("found pod %s missing pod-template-hash label: %#v", pod.Name, pods)
}
@ -694,7 +719,7 @@ func TestFailedDeployment(t *testing.T) {
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
tester.deployment.Spec.ProgressDeadlineSeconds = &three
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
}
@ -712,7 +737,7 @@ func TestFailedDeployment(t *testing.T) {
// Pods are not marked as Ready, therefore the deployment progress will eventually timeout after progressDeadlineSeconds has passed.
// Wait for the deployment to have a progress timeout condition.
if err = tester.waitForDeploymentWithCondition(deploymentutil.TimedOutReason, v1beta1.DeploymentProgressing); err != nil {
if err = tester.waitForDeploymentWithCondition(deploymentutil.TimedOutReason, apps.DeploymentProgressing); err != nil {
t.Fatal(err)
}
@ -722,7 +747,7 @@ func TestFailedDeployment(t *testing.T) {
}
// Wait for the deployment to have a progress complete condition.
if err = tester.waitForDeploymentWithCondition(deploymentutil.NewRSAvailableReason, v1beta1.DeploymentProgressing); err != nil {
if err = tester.waitForDeploymentWithCondition(deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing); err != nil {
t.Fatal(err)
}
}
@ -750,9 +775,9 @@ func TestOverlappingDeployments(t *testing.T) {
// Create 2 deployments with overlapping selectors
var err error
var rss []*v1beta1.ReplicaSet
var rss []*apps.ReplicaSet
for _, tester := range testers {
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
dname := tester.deployment.Name
if err != nil {
t.Fatalf("failed to create deployment %q: %v", dname, err)
@ -784,7 +809,7 @@ func TestOverlappingDeployments(t *testing.T) {
// Scale only the first deployment by 1
newReplicas := replicas + 1
testers[0].deployment, err = testers[0].updateDeployment(func(update *v1beta1.Deployment) {
testers[0].deployment, err = testers[0].updateDeployment(func(update *apps.Deployment) {
update.Spec.Replicas = &newReplicas
})
if err != nil {
@ -798,7 +823,7 @@ func TestOverlappingDeployments(t *testing.T) {
// Verify replicaset of both deployments has updated number of replicas
for i, tester := range testers {
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(rss[i].Name, metav1.GetOptions{})
rs, err := c.AppsV1().ReplicaSets(ns.Name).Get(rss[i].Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get replicaset %q: %v", rss[i].Name, err)
}
@ -828,7 +853,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %q: %v", name, err)
}
@ -847,7 +872,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
// Update the deployment with another new image but do not mark the pods as ready to block new replicaset
fakeImage2 := "fakeimage2"
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = fakeImage2
})
if err != nil {
@ -858,7 +883,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
}
// Verify the deployment has minimum available replicas after 2nd rollout
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get deployment %q: %v", name, err)
}
@ -868,7 +893,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
}
// Wait for old replicaset of 1st rollout to have desired replicas
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(firstRS.Name, metav1.GetOptions{})
firstRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(firstRS.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get replicaset %q: %v", firstRS.Name, err)
}
@ -888,7 +913,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
// Scale up the deployment and update its image to another new image simultaneously (this time marks all pods as ready)
newReplicas := int32(20)
fakeImage3 := "fakeimage3"
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = fakeImage3
})
@ -903,13 +928,13 @@ func TestScaledRolloutDeployment(t *testing.T) {
}
// Verify every replicaset has correct desiredReplicas annotation after 3rd rollout
thirdRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
thirdRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
if err != nil {
t.Fatalf("failed getting new revision 3 replicaset for deployment %q: %v", name, err)
}
rss := []*v1beta1.ReplicaSet{firstRS, secondRS, thirdRS}
rss := []*apps.ReplicaSet{firstRS, secondRS, thirdRS}
for _, curRS := range rss {
curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err)
}
@ -924,7 +949,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
// Update the deployment with another new image but do not mark the pods as ready to block new replicaset
fakeImage4 := "fakeimage4"
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = fakeImage4
})
if err != nil {
@ -935,7 +960,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
}
// Verify the deployment has minimum available replicas after 4th rollout
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get deployment %q: %v", name, err)
}
@ -945,7 +970,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
}
// Wait for old replicaset of 3rd rollout to have desired replicas
thirdRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(thirdRS.Name, metav1.GetOptions{})
thirdRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(thirdRS.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get replicaset %q: %v", thirdRS.Name, err)
}
@ -965,7 +990,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
// Scale down the deployment and update its image to another new image simultaneously (this time marks all pods as ready)
newReplicas = int32(5)
fakeImage5 := "fakeimage5"
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = fakeImage5
})
@ -980,13 +1005,13 @@ func TestScaledRolloutDeployment(t *testing.T) {
}
// Verify every replicaset has correct desiredReplicas annotation after 5th rollout
fifthRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
fifthRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
if err != nil {
t.Fatalf("failed getting new revision 5 replicaset for deployment %q: %v", name, err)
}
rss = []*v1beta1.ReplicaSet{thirdRS, fourthRS, fifthRS}
rss = []*apps.ReplicaSet{thirdRS, fourthRS, fifthRS}
for _, curRS := range rss {
curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err)
}
@ -1010,10 +1035,10 @@ func TestSpecReplicasChange(t *testing.T) {
deploymentName := "deployment"
replicas := int32(1)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
tester.deployment.Spec.Strategy.Type = v1beta1.RecreateDeploymentStrategyType
tester.deployment.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
tester.deployment.Spec.Strategy.RollingUpdate = nil
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
}
@ -1039,7 +1064,7 @@ func TestSpecReplicasChange(t *testing.T) {
// Add a template annotation change to test deployment's status does update
// without .spec.replicas change
var oldGeneration int64
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
oldGeneration = update.Generation
update.Spec.RevisionHistoryLimit = pointer.Int32Ptr(4)
})
@ -1068,8 +1093,10 @@ func TestDeploymentAvailableCondition(t *testing.T) {
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
// Assign a high value to the deployment's minReadySeconds
tester.deployment.Spec.MinReadySeconds = 3600
// progressDeadlineSeconds must be greater than minReadySeconds
tester.deployment.Spec.ProgressDeadlineSeconds = pointer.Int32Ptr(7200)
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
}
@ -1087,7 +1114,7 @@ func TestDeploymentAvailableCondition(t *testing.T) {
}
// Wait for the deployment to have MinimumReplicasUnavailable reason because the pods are not marked as ready
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, apps.DeploymentAvailable); err != nil {
t.Fatal(err)
}
@ -1107,7 +1134,7 @@ func TestDeploymentAvailableCondition(t *testing.T) {
}
// Wait for the deployment to still have MinimumReplicasUnavailable reason within minReadySeconds period
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, apps.DeploymentAvailable); err != nil {
t.Fatal(err)
}
@ -1117,7 +1144,7 @@ func TestDeploymentAvailableCondition(t *testing.T) {
}
// Update the deployment's minReadySeconds to a small value
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
update.Spec.MinReadySeconds = 1
})
if err != nil {
@ -1130,7 +1157,7 @@ func TestDeploymentAvailableCondition(t *testing.T) {
}
// Wait for the deployment to have MinimumReplicasAvailable reason after minReadySeconds period
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasAvailable, v1beta1.DeploymentAvailable); err != nil {
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasAvailable, apps.DeploymentAvailable); err != nil {
t.Fatal(err)
}
@ -1141,10 +1168,10 @@ func TestDeploymentAvailableCondition(t *testing.T) {
}
// Wait for deployment to automatically patch incorrect ControllerRef of RS
func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *v1beta1.ReplicaSet, ownerReference *metav1.OwnerReference, expectedOwnerReferenceNum int) {
func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *apps.ReplicaSet, ownerReference *metav1.OwnerReference, expectedOwnerReferenceNum int) {
ns := rs.Namespace
rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns)
rs, err := tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
rsClient := tester.c.AppsV1().ReplicaSets(ns)
rs, err := tester.updateReplicaSet(rs.Name, func(update *apps.ReplicaSet) {
update.OwnerReferences = []metav1.OwnerReference{*ownerReference}
})
if err != nil {
@ -1186,7 +1213,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) {
replicas := int32(1)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
}
@ -1209,7 +1236,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) {
}
// Get replicaset of the deployment
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
if err != nil {
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
}
@ -1233,7 +1260,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) {
func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, replicas int32) {
ns := tester.deployment.Namespace
deploymentName := tester.deployment.Name
deploymentClient := tester.c.ExtensionsV1beta1().Deployments(ns)
deploymentClient := tester.c.AppsV1().Deployments(ns)
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err)
@ -1280,7 +1307,7 @@ func TestDeploymentScaleSubresource(t *testing.T) {
replicas := int32(2)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
}
@ -1324,7 +1351,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
replicas := int32(1)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
}
@ -1349,7 +1376,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
// Orphaning: deployment should remove OwnerReference from a RS when the RS's labels change to not match its labels
// Get replicaset of the deployment
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
if err != nil {
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
}
@ -1368,7 +1395,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
// Change the replicaset's labels to not match the deployment's labels
labelMap := map[string]string{"new-name": "new-test"}
rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
rs, err = tester.updateReplicaSet(rs.Name, func(update *apps.ReplicaSet) {
update.Labels = labelMap
})
if err != nil {
@ -1376,7 +1403,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
}
// Wait for the controllerRef of the replicaset to become nil
rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns.Name)
rsClient := tester.c.AppsV1().ReplicaSets(ns.Name)
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
rs, err = rsClient.Get(rs.Name, metav1.GetOptions{})
if err != nil {
@ -1390,9 +1417,9 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
// Wait for the deployment to create a new replicaset
// This will trigger collision avoidance due to deterministic nature of replicaset name
// i.e., the new replicaset will have a name with different hash to preserve name uniqueness
var newRS *v1beta1.ReplicaSet
var newRS *apps.ReplicaSet
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
newRS, err = deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
newRS, err = deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
if err != nil {
return false, fmt.Errorf("failed to get new replicaset of deployment %q after orphaning: %v", deploymentName, err)
}
@ -1407,7 +1434,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
// Adoption: deployment should add controllerRef to a RS when the RS's labels change to match its labels
// Change the old replicaset's labels to match the deployment's labels
rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
rs, err = tester.updateReplicaSet(rs.Name, func(update *apps.ReplicaSet) {
update.Labels = testLabels()
})
if err != nil {

View File

@ -23,8 +23,9 @@ import (
"testing"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
@ -48,18 +49,18 @@ const (
fakeImage = "fakeimage"
)
var pauseFn = func(update *v1beta1.Deployment) {
var pauseFn = func(update *apps.Deployment) {
update.Spec.Paused = true
}
var resumeFn = func(update *v1beta1.Deployment) {
var resumeFn = func(update *apps.Deployment) {
update.Spec.Paused = false
}
type deploymentTester struct {
t *testing.T
c clientset.Interface
deployment *v1beta1.Deployment
deployment *apps.Deployment
}
func testLabels() map[string]string {
@ -67,22 +68,22 @@ func testLabels() map[string]string {
}
// newDeployment returns a RollingUpdate Deployment with with a fake container image
func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
return &v1beta1.Deployment{
func newDeployment(name, ns string, replicas int32) *apps.Deployment {
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "extensions/v1beta1",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Spec: v1beta1.DeploymentSpec{
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
RollingUpdate: new(v1beta1.RollingUpdateDeployment),
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: new(apps.RollingUpdateDeployment),
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
@ -101,8 +102,8 @@ func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
}
}
func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet {
return &v1beta1.ReplicaSet{
func newReplicaSet(name, ns string, replicas int32) *apps.ReplicaSet {
return &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicaSet",
APIVersion: "extensions/v1beta1",
@ -110,8 +111,9 @@ func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet {
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
Labels: testLabels(),
},
Spec: v1beta1.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: testLabels(),
},
@ -133,11 +135,11 @@ func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet {
}
}
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *v1beta1.DeploymentRollback {
return &v1beta1.DeploymentRollback{
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensions.DeploymentRollback {
return &extensions.DeploymentRollback{
Name: name,
UpdatedAnnotations: annotations,
RollbackTo: v1beta1.RollbackConfig{Revision: revision},
RollbackTo: extensions.RollbackConfig{Revision: revision},
}
}
@ -156,8 +158,8 @@ func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.R
metrics.UnregisterMetricAndUntrackRateLimiterUsage("deployment_controller")
dc, err := deployment.NewDeploymentController(
informers.Extensions().V1beta1().Deployments(),
informers.Extensions().V1beta1().ReplicaSets(),
informers.Apps().V1().Deployments(),
informers.Apps().V1().ReplicaSets(),
informers.Core().V1().Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")),
)
@ -256,7 +258,7 @@ func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) {
}
func (d *deploymentTester) deploymentComplete() (bool, error) {
latest, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
latest, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -285,6 +287,8 @@ func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsRe
// Manually mark updated Deployment pods as ready in a separate goroutine
wg.Add(1)
go d.markUpdatedPodsReady(&wg)
// Wait for goroutine to finish, for all return paths.
defer wg.Wait()
// Wait for the Deployment status to complete while Deployment pods are becoming ready
err := d.waitForDeploymentCompleteAndCheckRolling()
@ -292,9 +296,6 @@ func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsRe
return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err)
}
// Wait for goroutine to finish
wg.Wait()
return nil
}
@ -319,7 +320,7 @@ func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error {
return nil
}
func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {
func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*apps.Deployment, error) {
return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
}
@ -330,12 +331,12 @@ func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) er
return nil
}
func (d *deploymentTester) getNewReplicaSet() (*v1beta1.ReplicaSet, error) {
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
func (d *deploymentTester) getNewReplicaSet() (*apps.ReplicaSet, error) {
deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed retrieving deployment %s: %v", d.deployment.Name, err)
}
rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.ExtensionsV1beta1())
rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.AppsV1())
if err != nil {
return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
}
@ -353,7 +354,7 @@ func (d *deploymentTester) expectNoNewReplicaSet() error {
return nil
}
func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
func (d *deploymentTester) expectNewReplicaSet() (*apps.ReplicaSet, error) {
rs, err := d.getNewReplicaSet()
if err != nil {
return nil, err
@ -364,12 +365,12 @@ func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
return rs, nil
}
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateExtensionsReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
return testutil.UpdateExtensionsReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
}
func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateExtensionsReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
return testutil.UpdateExtensionsReplicaSetStatusWithRetries(d.c, d.deployment.Namespace, name, applyStatusUpdate, d.t.Logf, pollInterval, pollTimeout)
func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
return testutil.UpdateReplicaSetStatusWithRetries(d.c, d.deployment.Namespace, name, applyStatusUpdate, d.t.Logf, pollInterval, pollTimeout)
}
// waitForDeploymentRollbackCleared waits for deployment either started rolling back or doesn't need to rollback.
@ -386,7 +387,7 @@ func (d *deploymentTester) waitForDeploymentUpdatedReplicasGTE(minUpdatedReplica
return testutil.WaitForDeploymentUpdatedReplicasGTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
}
func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType v1beta1.DeploymentConditionType) error {
func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType apps.DeploymentConditionType) error {
return testutil.WaitForDeploymentWithCondition(d.c, d.deployment.Namespace, d.deployment.Name, reason, condType, d.t.Logf, pollInterval, pollTimeout)
}
@ -417,13 +418,13 @@ func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) {
return ownedPods, nil
}
func (d *deploymentTester) waitRSStable(replicaset *v1beta1.ReplicaSet) error {
return testutil.WaitExtensionsRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout)
func (d *deploymentTester) waitRSStable(replicaset *apps.ReplicaSet) error {
return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout)
}
func (d *deploymentTester) scaleDeployment(newReplicas int32) error {
var err error
d.deployment, err = d.updateDeployment(func(update *v1beta1.Deployment) {
d.deployment, err = d.updateDeployment(func(update *apps.Deployment) {
update.Spec.Replicas = &newReplicas
})
if err != nil {
@ -447,7 +448,7 @@ func (d *deploymentTester) scaleDeployment(newReplicas int32) error {
// waitForReadyReplicas waits for number of ready replicas to equal number of replicas.
func (d *deploymentTester) waitForReadyReplicas() error {
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
}
@ -485,7 +486,7 @@ func (d *deploymentTester) markUpdatedPodsReadyWithoutComplete() error {
// Verify all replicas fields of DeploymentStatus have desired count.
// Immediately return an error when found a non-matching replicas field.
func (d *deploymentTester) checkDeploymentStatusReplicasFields(replicas, updatedReplicas, readyReplicas, availableReplicas, unavailableReplicas int32) error {
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
}

View File

@ -22,8 +22,8 @@ import (
"github.com/davecgh/go-spew/spew"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -34,7 +34,7 @@ import (
type LogfFn func(format string, args ...interface{})
func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, logf LogfFn) {
func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, logf LogfFn) {
if newRS != nil {
logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS))
} else {
@ -48,7 +48,7 @@ func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*
}
}
func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, logf LogfFn) {
func LogPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet, logf LogfFn) {
minReadySeconds := deployment.Spec.MinReadySeconds
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return c.CoreV1().Pods(namespace).List(options)
@ -72,15 +72,15 @@ func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deploymen
// If during a rolling update (rolling == true), returns an error if the deployment's
// rolling update strategy (max unavailable or max surge) is broken at any times.
// It's not seen as a rolling update if shortly after a scaling event or the deployment is just created.
func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *extensions.Deployment, rolling bool, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *apps.Deployment, rolling bool, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var (
deployment *extensions.Deployment
deployment *apps.Deployment
reason string
)
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
deployment, err = c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -114,9 +114,9 @@ func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *extens
return nil
}
func checkRollingUpdateStatus(c clientset.Interface, deployment *extensions.Deployment, logf LogfFn) (string, error) {
func checkRollingUpdateStatus(c clientset.Interface, deployment *apps.Deployment, logf LogfFn) (string, error) {
var reason string
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
if err != nil {
return "", err
}
@ -128,7 +128,7 @@ func checkRollingUpdateStatus(c clientset.Interface, deployment *extensions.Depl
allRSs := append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, apps.DefaultDeploymentUniqueLabelKey) {
reason = "all replica sets need to contain the pod-template-hash label"
return reason, nil
}
@ -153,7 +153,7 @@ func checkRollingUpdateStatus(c clientset.Interface, deployment *extensions.Depl
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
rolling := true
return waitForDeploymentCompleteMaybeCheckRolling(c, d, rolling, logf, pollInterval, pollTimeout)
}
@ -161,7 +161,7 @@ func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensio
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
// such as shortly after a scaling event or the deployment is just created.
func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
rolling := false
return waitForDeploymentCompleteMaybeCheckRolling(c, d, rolling, logf, pollInterval, pollTimeout)
}
@ -169,17 +169,17 @@ func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment,
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
var deployment *apps.Deployment
var newRS *apps.ReplicaSet
var reason string
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err = c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
if err != nil {
return false, err
}
@ -205,26 +205,26 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("unable to get deployment %s during revision check: %v", deploymentName, err)
}
// Check revision of the new replica set of this deployment
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
if err != nil {
return fmt.Errorf("unable to get new replicaset of deployment %s during revision check: %v", deploymentName, err)
}
return checkRevisionAndImage(deployment, newRS, revision, image)
}
func checkRevisionAndImage(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, revision, image string) error {
func checkRevisionAndImage(deployment *apps.Deployment, newRS *apps.ReplicaSet, revision, image string) error {
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
if newRS == nil {
return fmt.Errorf("new replicaset for deployment %q is yet to be created", deployment.Name)
}
if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return fmt.Errorf("new replica set %q doesn't have %q label selector", newRS.Name, extensions.DefaultDeploymentUniqueLabelKey)
if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey) {
return fmt.Errorf("new replica set %q doesn't have %q label selector", newRS.Name, apps.DefaultDeploymentUniqueLabelKey)
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision {
@ -252,19 +252,19 @@ func containsImage(containers []v1.Container, imageName string) bool {
return false
}
type UpdateDeploymentFunc func(d *extensions.Deployment)
type UpdateDeploymentFunc func(d *apps.Deployment)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.Deployment, error) {
var deployment *extensions.Deployment
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*apps.Deployment, error) {
var deployment *apps.Deployment
var updateErr error
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
var err error
if deployment, err = c.ExtensionsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {
if deployment, err = c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = c.ExtensionsV1beta1().Deployments(namespace).Update(deployment); err == nil {
if deployment, err = c.AppsV1().Deployments(namespace).Update(deployment); err == nil {
logf("Updating deployment %s", name)
return true, nil
}
@ -278,20 +278,20 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string,
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
return c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
return deploymentutil.WaitForObservedDeployment(func() (*apps.Deployment, error) {
return c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
}, desiredGeneration, 2*time.Second, 1*time.Minute)
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string, pollInterval, pollTimeout time.Duration) error {
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
// Rollback not set or is kicked off
if deployment.Spec.RollbackTo == nil {
if deployment.Annotations[apps.DeprecatedRollbackTo] == "" {
return true, nil
}
return false, nil
@ -304,9 +304,9 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64, pollInterval, pollTimeout time.Duration) error {
var deployment *extensions.Deployment
var deployment *apps.Deployment
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
d, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -319,10 +319,10 @@ func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentNa
return nil
}
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var deployment *extensions.Deployment
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var deployment *apps.Deployment
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
d, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -332,7 +332,7 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason, latest deployment conditions: %+v", deployment.Name, deployment.Status.Conditions)
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
if err == nil {
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
LogPodsOfDeployment(c, deployment, append(allOldRSs, newRS), logf)

View File

@ -22,7 +22,6 @@ import (
"time"
apps "k8s.io/api/apps/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -53,33 +52,6 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string,
return rs, pollErr
}
// TODO(#55714): Remove this after Deployment tests use apps/v1 ReplicaSet.
type UpdateExtensionsReplicaSetFunc func(d *extensions.ReplicaSet)
// TODO(#55714): Remove this after Deployment tests use apps/v1 ReplicaSet.
func UpdateExtensionsReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateExtensionsReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.ReplicaSet, error) {
var rs *extensions.ReplicaSet
var updateErr error
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
var err error
if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rs)
if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Update(rs); err == nil {
logf("Updating replica set %q", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to replicaset %q: %v", name, updateErr)
}
return rs, pollErr
}
// Verify .Status.Replicas is equal to .Spec.Replicas
func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaSet, pollInterval, pollTimeout time.Duration) error {
desiredGeneration := rs.Generation
@ -95,33 +67,17 @@ func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaS
return nil
}
// TODO(#55714): Remove after Deployment tests use apps/v1 ReplicaSet.
func WaitExtensionsRSStable(t *testing.T, clientSet clientset.Interface, rs *extensions.ReplicaSet, pollInterval, pollTimeout time.Duration) error {
desiredGeneration := rs.Generation
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
newRS, err := clientSet.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return newRS.Status.ObservedGeneration >= desiredGeneration && newRS.Status.Replicas == *rs.Spec.Replicas, nil
}); err != nil {
return fmt.Errorf("failed to verify .Status.Replicas is equal to .Spec.Replicas for replicaset %q: %v", rs.Name, err)
}
return nil
}
// TODO(#55714): Remove after Deployment tests use apps/v1 ReplicaSet.
func UpdateExtensionsReplicaSetStatusWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateExtensionsReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.ReplicaSet, error) {
var rs *extensions.ReplicaSet
func UpdateReplicaSetStatusWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*apps.ReplicaSet, error) {
var rs *apps.ReplicaSet
var updateErr error
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
var err error
if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
if rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rs)
if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).UpdateStatus(rs); err == nil {
if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(rs); err == nil {
logf("Updating replica set %q", name)
return true, nil
}