Merge pull request #48068 from soltysh/cronjob_removal_e2e

Automatic merge from submit-queue

Add e2e test for cronjob chained removal

This is test proving https://github.com/kubernetes/kubernetes/pull/44058 works with cronjobs. This will fail until the aforementioned PR merges. 

@caesarxuchao ptal
pull/6/head
Kubernetes Submit Queue 2017-08-08 04:58:17 -07:00 committed by GitHub
commit 6b529fb096
2 changed files with 128 additions and 8 deletions

View File

@ -33,6 +33,8 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
@ -43,6 +45,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",

View File

@ -20,6 +20,8 @@ import (
"fmt"
"time"
batchv1 "k8s.io/api/batch/v1"
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
"k8s.io/api/core/v1"
v1beta1 "k8s.io/api/extensions/v1beta1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
@ -28,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/names"
@ -46,6 +49,11 @@ func getForegroundOptions() *metav1.DeleteOptions {
return &metav1.DeleteOptions{PropagationPolicy: &policy}
}
func getBackgroundOptions() *metav1.DeleteOptions {
policy := metav1.DeletePropagationBackground
return &metav1.DeleteOptions{PropagationPolicy: &policy}
}
func getOrphanOptions() *metav1.DeleteOptions {
var trueVar = true
return &metav1.DeleteOptions{OrphanDependents: &trueVar}
@ -56,7 +64,11 @@ func getNonOrphanOptions() *metav1.DeleteOptions {
return &metav1.DeleteOptions{OrphanDependents: &falseVar}
}
var zero = int64(0)
var (
zero = int64(0)
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
)
func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
return v1.PodTemplateSpec{
@ -175,10 +187,10 @@ func newGCPod(name string) *v1.Pod {
}
}
// verifyRemainingObjects verifies if the number of the remaining replication
// verifyRemainingReplicationControllersPods verifies if the number of the remaining replication
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
func verifyRemainingReplicationControllersPods(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
pods, err := clientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
@ -200,6 +212,42 @@ func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interfac
return ret, nil
}
// verifyRemainingCronJobsJobsPods verifies if the number of remaining cronjobs,
// jobs and pods. It returns error if the communication with the API server fails.
func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset.Interface,
cjNum, jobNum, podNum int) (bool, error) {
var ret = true
cronJobs, err := f.ClientSet.BatchV2alpha1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list cronjobs: %v", err)
}
if len(cronJobs.Items) != cjNum {
ret = false
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", cjNum, len(cronJobs.Items)))
}
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list jobs: %v", err)
}
if len(jobs.Items) != jobNum {
ret = false
By(fmt.Sprintf("expected %d jobs, got %d jobs", jobNum, len(jobs.Items)))
}
pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
}
return ret, nil
}
func gatherMetrics(f *framework.Framework) {
By("Gathering metrics")
var summary framework.TestDataSummary
@ -217,6 +265,40 @@ func gatherMetrics(f *framework.Framework) {
}
}
func newCronJob(name, schedule string) *batchv2alpha1.CronJob {
parallelism := int32(1)
completions := int32(1)
return &batchv2alpha1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
TypeMeta: metav1.TypeMeta{
Kind: "CronJob",
},
Spec: batchv2alpha1.CronJobSpec{
Schedule: schedule,
JobTemplate: batchv2alpha1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Parallelism: &parallelism,
Completions: &completions,
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []v1.Container{
{
Name: "c",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sleep", "300"},
},
},
},
},
},
},
},
}
}
var _ = SIGDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc")
It("should delete pods created by rc when not orphaning", func() {
@ -259,7 +341,7 @@ var _ = SIGDescribe("Garbage collector", func() {
By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(f, clientSet, 0, 0)
return verifyRemainingReplicationControllersPods(f, clientSet, 0, 0)
}); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(metav1.ListOptions{})
@ -413,8 +495,8 @@ var _ = SIGDescribe("Garbage collector", func() {
return len(rsList.Items) > 0, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
if err != nil {
framework.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
}
By("delete the deployment")
@ -464,8 +546,8 @@ var _ = SIGDescribe("Garbage collector", func() {
return len(rsList.Items) > 0, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
if err != nil {
framework.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
}
By("delete the deployment")
@ -852,4 +934,39 @@ var _ = SIGDescribe("Garbage collector", func() {
}
}
})
It("should delete jobs and pods created by cronjob", func() {
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResource, f.Namespace.Name)
By("Create the cronjob")
cronJob := newCronJob("simple", "*/1 * * * ?")
cronJob, err := f.ClientSet.BatchV2alpha1().CronJobs(f.Namespace.Name).Create(cronJob)
Expect(err).NotTo(HaveOccurred())
By("Wait for the CronJob to create new Job")
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list jobs: %v", err)
}
return len(jobs.Items) > 0, nil
})
if err != nil {
framework.Failf("Failed to wait for the CronJob to create some Jobs: %v", err)
}
By("Delete the cronjob")
if err := f.ClientSet.BatchV2alpha1().CronJobs(f.Namespace.Name).Delete(cronJob.Name, getBackgroundOptions()); err != nil {
framework.Failf("Failed to delete the CronJob: %v", err)
}
By("Verify if cronjob does not leave jobs nor pods behind")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingCronJobsJobsPods(f, f.ClientSet, 0, 0, 0)
})
if err != nil {
framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)
}
gatherMetrics(f)
})
})