Adding reaper for deployments

pull/6/head
Maciej Szulik 2015-12-02 16:09:01 +01:00
parent eb5f707f24
commit 0ea31b56ed
4 changed files with 294 additions and 154 deletions

View File

@ -637,13 +637,13 @@ runTests() {
# Post-Condition: service "nginx" has configuration annotation
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,svc nginx
kubectl delete rc,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: autoscale rc "frontend"
# Command: autoscale rc "frontend"
kubectl autoscale -f examples/guestbook/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
@ -653,21 +653,21 @@ runTests() {
## kubectl apply should create the resource that doesn't exist yet
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
# Clean up
kubectl delete pods test-pod "${kube_flags[@]}"
## kubectl run should create deployments or jobs
## kubectl run should create deployments or jobs
# Pre-Condition: no Job exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --generator=job/v1beta1 --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: Job "pi" is created
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
@ -675,11 +675,10 @@ runTests() {
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx --image=nginx --generator=deployment/v1beta1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
# Clean up
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
kubectl delete rc -l pod-template-hash "${kube_flags[@]}"
##############
# Namespaces #
@ -721,7 +720,7 @@ runTests() {
# Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Clean up
kubectl delete namespace other
@ -966,6 +965,7 @@ __EOF__
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
# Clean-up
kubectl delete job/pi "${kube_flags[@]}"
# TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
# ### Scale a deployment
# kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
@ -975,8 +975,6 @@ __EOF__
# kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
# # Clean-up
# kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
# # TODO: Remove once deployment reaping is implemented
# kubectl delete rs --all "${kube_flags[@]}"
### Expose a deployment as a service
kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}"
@ -988,8 +986,6 @@ __EOF__
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
# Clean-up
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
# TODO: Remove once deployment reaping is implemented
kubectl delete rs --all "${kube_flags[@]}"
### Expose replication controller as service
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
@ -1094,7 +1090,7 @@ __EOF__
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"
### Auto scale deployment
### Auto scale deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
@ -1106,9 +1102,8 @@ __EOF__
# Clean up
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kubectl delete rs -l pod-template-hash "${kube_flags[@]}"
### Rollback a deployment
### Rollback a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
@ -1330,7 +1325,7 @@ __EOF__
# cleaning
rm "${temp_editor}"
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing labels.
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
@ -1349,7 +1344,7 @@ __EOF__
fi
# Command
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing annotations.
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"

View File

@ -78,6 +78,9 @@ func ReaperFor(kind unversioned.GroupKind, c client.Interface) (Reaper, error) {
case extensions.Kind("Job"):
return &JobReaper{c, Interval, Timeout}, nil
case extensions.Kind("Deployment"):
return &DeploymentReaper{c, Interval, Timeout}, nil
}
return nil, &NoSuchReaperError{kind}
}
@ -102,6 +105,10 @@ type JobReaper struct {
client.Interface
pollInterval, timeout time.Duration
}
type DeploymentReaper struct {
client.Interface
pollInterval, timeout time.Duration
}
type PodReaper struct {
client.Interface
}
@ -191,10 +198,7 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout
return err
}
}
if err := rc.Delete(name); err != nil {
return err
}
return nil
return rc.Delete(name)
}
// TODO(madhusudancs): Implement it when controllerRef is implemented - https://github.com/kubernetes/kubernetes/issues/2210
@ -303,10 +307,7 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio
return err
}
if err := reaper.Extensions().DaemonSets(namespace).Delete(name); err != nil {
return err
}
return nil
return reaper.Extensions().DaemonSets(namespace).Delete(name)
}
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
@ -352,10 +353,92 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra
return utilerrors.NewAggregate(errList)
}
// once we have all the pods removed we can safely remove the job itself
if err := jobs.Delete(name, gracePeriod); err != nil {
return jobs.Delete(name, gracePeriod)
}
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
deployments := reaper.Extensions().Deployments(namespace)
replicaSets := reaper.Extensions().ReplicaSets(namespace)
rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper)
deployment, err := deployments.Get(name)
if err != nil {
return err
}
return nil
// set deployment's history and scale to 0
// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
zero := 0
deployment.Spec.RevisionHistoryLimit = &zero
deployment.Spec.Replicas = 0
// TODO: un-pausing should not be necessary, remove when this is fixed:
// https://github.com/kubernetes/kubernetes/issues/20966
// Instead deployment should be Paused at this point and not at next TODO.
deployment.Spec.Paused = false
deployment, err = deployments.Update(deployment)
if err != nil {
return err
}
// wait for total no of pods drop to 0
if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
curr, err := deployments.Get(name)
// if deployment was not found it must have been deleted, error out
if err != nil && errors.IsNotFound(err) {
return false, err
}
// if other errors happen, retry
if err != nil {
return false, nil
}
// check if deployment wasn't recreated with the same name
// TODO use generations when deployment will have them
if curr.UID != deployment.UID {
return false, errors.NewNotFound(extensions.Resource("Deployment"), name)
}
return curr.Status.Replicas == 0, nil
}); err != nil {
return err
}
// TODO: When deployments will allow running cleanup policy while being
// paused, move pausing to above update operation. Without it, we need to
// pause deployment before stopping RSs, to prevent creating new RSs.
// See https://github.com/kubernetes/kubernetes/issues/20966
deployment, err = deployments.Get(name)
if err != nil {
return err
}
deployment.Spec.Paused = true
deployment, err = deployments.Update(deployment)
if err != nil {
return err
}
// remove remaining RSs
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return err
}
options := api.ListOptions{LabelSelector: selector}
rsList, err := replicaSets.List(options)
if err != nil {
return err
}
errList := []error{}
for _, rc := range rsList.Items {
if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
if !errors.IsNotFound(err) {
errList = append(errList, err)
}
}
}
if len(errList) > 0 {
return utilerrors.NewAggregate(errList)
}
// and finally deployment
return deployments.Delete(name, gracePeriod)
}
func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
@ -364,11 +447,7 @@ func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gra
if err != nil {
return err
}
if err := pods.Delete(name, gracePeriod); err != nil {
return err
}
return nil
return pods.Delete(name, gracePeriod)
}
func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
@ -377,8 +456,5 @@ func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration,
if err != nil {
return err
}
if err := services.Delete(name); err != nil {
return err
}
return nil
return services.Delete(name)
}

View File

@ -29,6 +29,7 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/runtime"
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
)
func TestReplicationControllerStop(t *testing.T) {
@ -495,6 +496,133 @@ func TestJobStop(t *testing.T) {
}
}
func TestDeploymentStop(t *testing.T) {
name := "foo"
ns := "default"
deployment := extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: extensions.DeploymentSpec{
Replicas: 0,
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}},
},
Status: extensions.DeploymentStatus{
Replicas: 0,
},
}
template := deploymentutil.GetNewReplicaSetTemplate(deployment)
tests := []struct {
Name string
Objs []runtime.Object
StopError error
ExpectedActions []string
}{
{
Name: "SimpleDeployment",
Objs: []runtime.Object{
&extensions.Deployment{ // GET
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: extensions.DeploymentSpec{
Replicas: 0,
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}},
},
Status: extensions.DeploymentStatus{
Replicas: 0,
},
},
&extensions.Scale{ // UPDATE
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: extensions.ScaleSpec{
Replicas: 0,
},
Status: extensions.ScaleStatus{
Replicas: 0,
Selector: map[string]string{"k1": "v1"},
},
},
},
StopError: nil,
ExpectedActions: []string{"get:deployments", "update:deployments",
"get:deployments", "get:deployments", "update:deployments",
"list:replicasets", "delete:deployments"},
},
{
Name: "Deployment with single replicaset",
Objs: []runtime.Object{
&deployment, // GET
&extensions.Scale{ // UPDATE
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: extensions.ScaleSpec{
Replicas: 0,
},
Status: extensions.ScaleStatus{
Replicas: 0,
Selector: map[string]string{"k1": "v1"},
},
},
&extensions.ReplicaSetList{ // LIST
Items: []extensions.ReplicaSet{
{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: extensions.ReplicaSetSpec{
Template: &template,
},
},
},
},
},
StopError: nil,
ExpectedActions: []string{"get:deployments", "update:deployments",
"get:deployments", "get:deployments", "update:deployments",
"list:replicasets", "get:replicasets", "get:replicasets",
"update:replicasets", "get:replicasets", "get:replicasets",
"delete:replicasets", "delete:deployments"},
},
}
for _, test := range tests {
fake := testclient.NewSimpleFake(test.Objs...)
reaper := DeploymentReaper{fake, time.Millisecond, time.Millisecond}
err := reaper.Stop(ns, name, 0, nil)
if !reflect.DeepEqual(err, test.StopError) {
t.Errorf("%s unexpected error: %v", test.Name, err)
continue
}
actions := fake.Actions()
if len(actions) != len(test.ExpectedActions) {
t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions))
continue
}
for i, expAction := range test.ExpectedActions {
action := strings.Split(expAction, ":")
if actions[i].GetVerb() != action[0] {
t.Errorf("%s unexpected verb: %+v, expected %s", test.Name, actions[i], expAction)
}
if actions[i].GetResource() != action[1] {
t.Errorf("%s unexpected resource: %+v, expected %s", test.Name, actions[i], expAction)
}
if len(action) == 3 && actions[i].GetSubresource() != action[2] {
t.Errorf("%s unexpected subresource: %+v, expected %s", test.Name, actions[i], expAction)
}
}
}
}
type noSuchPod struct {
*testclient.FakePods
}

View File

@ -21,12 +21,15 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/kubectl"
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -65,6 +68,7 @@ var _ = Describe("Deployment [Feature:Deployment]", func() {
})
func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
zero := int64(0)
return &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: rsName,
@ -77,6 +81,7 @@ func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName
Labels: rsPodLabels,
},
Spec: api.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []api.Container{
{
Name: imageName,
@ -90,6 +95,7 @@ func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName
}
func newDeployment(deploymentName string, replicas int, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int) *extensions.Deployment {
zero := int64(0)
return &extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: deploymentName,
@ -106,6 +112,7 @@ func newDeployment(deploymentName string, replicas int, podLabels map[string]str
Labels: podLabels,
},
Spec: api.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []api.Container{
{
Name: imageName,
@ -149,6 +156,43 @@ func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revisio
return deployment, newRS
}
func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymentName string) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), oldC)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
Logf("ensuring deployment %s was deleted", deploymentName)
_, err = c.Extensions().Deployments(ns).Get(deployment.Name)
Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue())
Logf("ensuring deployment %s rcs were deleted", deploymentName)
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
options := api.ListOptions{LabelSelector: selector}
rss, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(rss.Items).Should(HaveLen(0))
Logf("ensuring deployment %s pods were deleted", deploymentName)
if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
pods, err := c.Core().Pods(ns).List(api.ListOptions{})
if err != nil {
return false, err
}
if len(pods.Items) == 0 {
return true, nil
}
return false, nil
}); err != nil {
Failf("Failed to remove deployment %s pods!", deploymentName)
}
}
func testNewDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
@ -164,16 +208,8 @@ func testNewDeployment(f *Framework) {
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
defer stopDeployment(c, f.Client, ns, deploymentName)
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
@ -216,10 +252,6 @@ func testRollingUpdateDeployment(f *Framework) {
replicas := 3
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
if err != nil {
@ -232,16 +264,7 @@ func testRollingUpdateDeployment(f *Framework) {
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
defer stopDeployment(c, f.Client, ns, deploymentName)
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
@ -273,10 +296,6 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1)
if err != nil {
@ -289,16 +308,7 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
defer stopDeployment(c, f.Client, ns, deploymentName)
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
@ -341,10 +351,6 @@ func testRecreateDeployment(f *Framework) {
replicas := 3
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
if err != nil {
@ -357,16 +363,7 @@ func testRecreateDeployment(f *Framework) {
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RecreateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
defer stopDeployment(c, f.Client, ns, deploymentName)
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
if err != nil {
@ -426,16 +423,7 @@ func testDeploymentCleanUpPolicy(f *Framework) {
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
defer stopDeployment(c, f.Client, ns, deploymentName)
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
Expect(err).NotTo(HaveOccurred())
@ -460,10 +448,6 @@ func testRolloverDeployment(f *Framework) {
rsReplicas := 4
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, podName, false, rsReplicas)
if err != nil {
@ -486,16 +470,8 @@ func testRolloverDeployment(f *Framework) {
}
_, err = c.Extensions().Deployments(ns).Create(newDeployment)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
defer stopDeployment(c, f.Client, ns, deploymentName)
// Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
@ -534,12 +510,7 @@ func testPausedDeployment(f *Framework) {
Logf("Creating paused deployment %s", deploymentName)
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
_, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
}()
defer stopDeployment(c, f.Client, ns, deploymentName)
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
@ -618,21 +589,8 @@ func testRollbackDeployment(f *Framework) {
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
Expect(err).NotTo(HaveOccurred())
for _, oldRS := range oldRSs {
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
}
}()
defer stopDeployment(c, f.Client, ns, deploymentName)
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
@ -718,10 +676,6 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
rs.Annotations["make"] = "difference"
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Create a deployment to create nginx pods, which have different template than the replica set created above.
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
@ -732,21 +686,8 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
_, err = c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
Expect(err).NotTo(HaveOccurred())
for _, oldRS := range oldRSs {
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
}
}()
defer stopDeployment(c, f.Client, ns, deploymentName)
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())