mirror of https://github.com/k3s-io/k3s
Add e2e test for TTL after finished
parent
47d06c446d
commit
0a6389e872
|
@ -44,7 +44,7 @@ var _ = SIGDescribe("Job", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -63,7 +63,7 @@ var _ = SIGDescribe("Job", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -84,7 +84,7 @@ var _ = SIGDescribe("Job", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ var _ = SIGDescribe("Metadata Concealment", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
|
|
@ -33,6 +33,7 @@ go_library(
|
|||
"secrets.go",
|
||||
"secrets_volume.go",
|
||||
"sysctl.go",
|
||||
"ttlafterfinished.go",
|
||||
"util.go",
|
||||
"volumes.go",
|
||||
],
|
||||
|
@ -42,13 +43,16 @@ go_library(
|
|||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
|
@ -64,6 +68,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const dummyFinalizer = "k8s.io/dummy-finalizer"
|
||||
|
||||
var _ = framework.KubeDescribe("TTLAfterFinished", func() {
|
||||
f := framework.NewDefaultFramework("ttlafterfinished")
|
||||
|
||||
It("Job should be deleted once it finishes after TTL seconds [Feature:TTLAfterFinished]", func() {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished) {
|
||||
framework.Skipf("Skip because %s feature is not enabled; run test with --feature-gates=%s=true", features.TTLAfterFinished, features.TTLAfterFinished)
|
||||
}
|
||||
testFinishedJob(f)
|
||||
})
|
||||
})
|
||||
|
||||
func cleanupJob(f *framework.Framework, job *batch.Job) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
framework.Logf("Remove the Job's dummy finalizer; the Job should be deleted cascadingly")
|
||||
removeFinalizerFunc := func(j *batch.Job) {
|
||||
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
|
||||
}
|
||||
_, err := framework.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
|
||||
|
||||
err = framework.WaitForAllJobPodsGone(c, ns, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testFinishedJob(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
parallelism := int32(1)
|
||||
completions := int32(1)
|
||||
backoffLimit := int32(2)
|
||||
ttl := int32(10)
|
||||
|
||||
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job.Spec.TTLSecondsAfterFinished = &ttl
|
||||
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
|
||||
defer cleanupJob(f, job)
|
||||
|
||||
framework.Logf("Create a Job %s/%s with TTL", job.Namespace, job.Name)
|
||||
job, err := framework.CreateJob(c, ns, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Wait for the Job to finish")
|
||||
err = framework.WaitForJobFinish(c, ns, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Wait for TTL after finished controller to delete the Job")
|
||||
err = framework.WaitForJobDeleting(c, ns, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
|
||||
job, err = framework.GetJob(c, ns, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
finishTime := framework.JobFinishTime(job)
|
||||
finishTimeUTC := finishTime.UTC()
|
||||
Expect(finishTime.IsZero()).NotTo(BeTrue())
|
||||
|
||||
deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC()
|
||||
Expect(deleteAtUTC).NotTo(BeNil())
|
||||
|
||||
expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second)
|
||||
Expect(deleteAtUTC.Before(expireAtUTC)).To(BeFalse())
|
||||
}
|
|
@ -59,6 +59,7 @@ go_library(
|
|||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/service:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
jobutil "k8s.io/kubernetes/pkg/controller/job"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -181,8 +182,8 @@ func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, paralle
|
|||
})
|
||||
}
|
||||
|
||||
// WaitForJobFinish uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
// WaitForJobComplete uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -192,6 +193,17 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int
|
|||
})
|
||||
}
|
||||
|
||||
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
|
||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return jobutil.IsJobFinished(curr), nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
|
||||
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
|
@ -239,6 +251,18 @@ func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parall
|
|||
return count == parallelism, nil
|
||||
}
|
||||
|
||||
// WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns
|
||||
// to be deleted.
|
||||
func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
pods, err := GetJobPods(c, ns, jobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(pods.Items) == 0, nil
|
||||
})
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
p := new(bool)
|
||||
*p = val
|
||||
|
@ -250,7 +274,7 @@ type updateJobFunc func(*batch.Job)
|
|||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
pollErr := wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -268,3 +292,25 @@ func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
|
|||
}
|
||||
return job, pollErr
|
||||
}
|
||||
|
||||
// WaitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
|
||||
// a non-nil deletionTimestamp (i.e. being deleted).
|
||||
func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return curr.ObjectMeta.DeletionTimestamp != nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
func JobFinishTime(finishedJob *batch.Job) metav1.Time {
|
||||
var finishTime metav1.Time
|
||||
for _, c := range finishedJob.Status.Conditions {
|
||||
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
|
||||
return c.LastTransitionTime
|
||||
}
|
||||
}
|
||||
return finishTime
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue