2015-09-17 19:35:25 +00:00
|
|
|
/*
|
2016-06-03 00:25:58 +00:00
|
|
|
Copyright 2015 The Kubernetes Authors.
|
2015-09-17 19:35:25 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"k8s.io/kubernetes/pkg/api"
|
|
|
|
"k8s.io/kubernetes/pkg/api/errors"
|
2016-04-18 15:44:19 +00:00
|
|
|
"k8s.io/kubernetes/pkg/apis/batch"
|
2015-09-17 19:35:25 +00:00
|
|
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
|
|
|
"k8s.io/kubernetes/pkg/kubectl"
|
|
|
|
"k8s.io/kubernetes/pkg/labels"
|
|
|
|
"k8s.io/kubernetes/pkg/util/wait"
|
2016-04-07 17:21:31 +00:00
|
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
2015-09-17 19:35:25 +00:00
|
|
|
|
|
|
|
. "github.com/onsi/ginkgo"
|
|
|
|
. "github.com/onsi/gomega"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// How long to wait for a job to finish.
|
2015-10-05 19:55:24 +00:00
|
|
|
jobTimeout = 15 * time.Minute
|
2015-09-17 19:35:25 +00:00
|
|
|
|
|
|
|
// Job selector name
|
2015-09-25 16:31:06 +00:00
|
|
|
jobSelectorKey = "job"
|
2015-09-17 19:35:25 +00:00
|
|
|
)
|
|
|
|
|
2016-04-07 17:21:31 +00:00
|
|
|
var _ = framework.KubeDescribe("Job", func() {
|
|
|
|
f := framework.NewDefaultFramework("job")
|
2016-04-27 04:35:14 +00:00
|
|
|
parallelism := int32(2)
|
|
|
|
completions := int32(4)
|
|
|
|
lotsOfFailures := int32(5) // more than completions
|
2015-09-17 19:35:25 +00:00
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
// Simplest case: all pods succeed promptly
|
|
|
|
It("should run a job to completion when tasks succeed", func() {
|
2015-09-17 19:35:25 +00:00
|
|
|
By("Creating a job")
|
2015-09-25 16:31:06 +00:00
|
|
|
job := newTestJob("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions)
|
|
|
|
job, err := createJob(f.Client, f.Namespace.Name, job)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
By("Ensuring job reaches completions")
|
|
|
|
err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2015-09-25 16:31:06 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Pods sometimes fail, but eventually succeed.
|
|
|
|
It("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
|
|
|
|
By("Creating a job")
|
2015-10-25 05:51:02 +00:00
|
|
|
// One failure, then a success, local restarts.
|
|
|
|
// We can't use the random failure approach used by the
|
|
|
|
// non-local test below, because kubelet will throttle
|
|
|
|
// frequently failing containers in a given pod, ramping
|
|
|
|
// up to 5 minutes between restarts, making test timeouts
|
|
|
|
// due to successive failures too likely with a reasonable
|
|
|
|
// test timeout.
|
|
|
|
job := newTestJob("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions)
|
2015-09-25 16:31:06 +00:00
|
|
|
job, err := createJob(f.Client, f.Namespace.Name, job)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Ensuring job reaches completions")
|
|
|
|
err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
})
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
// Pods sometimes fail, but eventually succeed, after pod restarts
|
|
|
|
It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
|
2015-09-17 19:35:25 +00:00
|
|
|
By("Creating a job")
|
2015-09-25 16:31:06 +00:00
|
|
|
// 50% chance of container success, local restarts.
|
2015-10-25 05:51:02 +00:00
|
|
|
// Can't use the failOnce approach because that relies
|
|
|
|
// on an emptyDir, which is not preserved across new pods.
|
|
|
|
// Worst case analysis: 15 failures, each taking 1 minute to
|
2015-10-26 23:36:01 +00:00
|
|
|
// run due to some slowness, 1 in 2^15 chance of happening,
|
2015-10-25 05:51:02 +00:00
|
|
|
// causing test flake. Should be very rare.
|
2015-09-25 16:31:06 +00:00
|
|
|
job := newTestJob("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
|
|
|
|
job, err := createJob(f.Client, f.Namespace.Name, job)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
By("Ensuring job reaches completions")
|
|
|
|
err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
})
|
|
|
|
|
|
|
|
It("should keep restarting failed pods", func() {
|
|
|
|
By("Creating a job")
|
|
|
|
job := newTestJob("fail", "all-fail", api.RestartPolicyNever, parallelism, completions)
|
|
|
|
job, err := createJob(f.Client, f.Namespace.Name, job)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
By("Ensuring job shows many failures")
|
2016-04-07 17:21:31 +00:00
|
|
|
err = wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
|
2015-10-12 18:18:50 +00:00
|
|
|
curr, err := f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name)
|
2015-09-17 19:35:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2015-10-08 17:33:39 +00:00
|
|
|
return curr.Status.Failed > lotsOfFailures, nil
|
2015-09-17 19:35:25 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
It("should scale a job up", func() {
|
2016-04-27 04:35:14 +00:00
|
|
|
startParallelism := int32(1)
|
|
|
|
endParallelism := int32(2)
|
2015-09-17 19:35:25 +00:00
|
|
|
By("Creating a job")
|
2015-09-25 16:31:06 +00:00
|
|
|
job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
|
|
|
|
job, err := createJob(f.Client, f.Namespace.Name, job)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
By("Ensuring active pods == startParallelism")
|
|
|
|
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("scale job up")
|
2016-04-18 15:44:19 +00:00
|
|
|
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
|
|
|
|
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
|
2015-09-25 16:31:06 +00:00
|
|
|
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
By("Ensuring active pods == endParallelism")
|
|
|
|
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
})
|
|
|
|
|
|
|
|
It("should scale a job down", func() {
|
2016-04-27 04:35:14 +00:00
|
|
|
startParallelism := int32(2)
|
|
|
|
endParallelism := int32(1)
|
2015-09-17 19:35:25 +00:00
|
|
|
By("Creating a job")
|
2015-09-25 16:31:06 +00:00
|
|
|
job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions)
|
|
|
|
job, err := createJob(f.Client, f.Namespace.Name, job)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
By("Ensuring active pods == startParallelism")
|
|
|
|
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("scale job down")
|
2016-04-18 15:44:19 +00:00
|
|
|
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
|
|
|
|
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
|
2015-09-25 16:31:06 +00:00
|
|
|
err = scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
By("Ensuring active pods == endParallelism")
|
|
|
|
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
})
|
|
|
|
|
2016-02-01 14:16:22 +00:00
|
|
|
It("should delete a job", func() {
|
2015-09-17 19:35:25 +00:00
|
|
|
By("Creating a job")
|
2015-09-25 16:31:06 +00:00
|
|
|
job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
|
|
|
|
job, err := createJob(f.Client, f.Namespace.Name, job)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Ensuring active pods == parallelism")
|
2015-09-25 16:31:06 +00:00
|
|
|
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, parallelism)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2016-02-01 14:16:22 +00:00
|
|
|
By("delete a job")
|
2016-04-18 15:44:19 +00:00
|
|
|
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.Client)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
timeout := 1 * time.Minute
|
2015-11-12 14:42:29 +00:00
|
|
|
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Ensuring job was deleted")
|
2015-10-12 18:18:50 +00:00
|
|
|
_, err = f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name)
|
2015-09-17 19:35:25 +00:00
|
|
|
Expect(err).To(HaveOccurred())
|
|
|
|
Expect(errors.IsNotFound(err)).To(BeTrue())
|
|
|
|
})
|
2015-11-26 15:54:04 +00:00
|
|
|
|
|
|
|
It("should fail a job", func() {
|
|
|
|
By("Creating a job")
|
|
|
|
job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
|
|
|
|
activeDeadlineSeconds := int64(10)
|
|
|
|
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
|
|
|
|
job, err := createJob(f.Client, f.Namespace.Name, job)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Ensuring job was failed")
|
|
|
|
err = waitForJobFail(f.Client, f.Namespace.Name, job.Name)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
})
|
2015-09-17 19:35:25 +00:00
|
|
|
})
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
// newTestJob returns a job which does one of several testing behaviors.
|
2016-04-27 04:35:14 +00:00
|
|
|
func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) *batch.Job {
|
2016-04-18 15:44:19 +00:00
|
|
|
job := &batch.Job{
|
2015-09-17 19:35:25 +00:00
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: name,
|
|
|
|
},
|
2016-04-18 15:44:19 +00:00
|
|
|
Spec: batch.JobSpec{
|
Added Selector Generation to Job.
Added selector generation to Job's
strategy.Validate, right before validation.
Can't do in defaulting since UID is not known.
Added a validation to Job to ensure that the generated
labels and selector are correct when generation was requested.
This happens right after generation, but validation is in a better
place to return an error.
Adds "manualSelector" field to batch/v1 Job to control selector generation.
Adds same field to extensions/__internal. Conversion between those two
is automatic.
Adds "autoSelector" field to extensions/v1beta1 Job. Used for storing batch/v1 Jobs
- Default for v1 is to do generation.
- Default for v1beta1 is to not do it.
- In both cases, unset == false == do the default thing.
Release notes:
Added batch/v1 group, which contains just Job, and which is the next
version of extensions/v1beta1 Job.
The changes from the previous version are:
- Users no longer need to ensure labels on their pod template are unique to the enclosing
job (but may add labels as needed for categorization).
- In v1beta1, job.spec.selector was defaulted from pod labels, with the user responsible for uniqueness.
In v1, a unique label is generated and added to the pod template, and used as the selector (other
labels added by user stay on pod template, but need not be used by selector).
- a new field called "manualSelector" field exists to control whether the new behavior is used,
versus a more error-prone but more flexible "manual" (not generated) seletor. Most users
will not need to use this field and should leave it unset.
Users who are creating extensions.Job go objects and then posting them using the go client
will see a change in the default behavior. They need to either stop providing a selector (relying on
selector generation) or else specify "spec.manualSelector" until they are ready to do the former.
2016-02-08 23:55:40 +00:00
|
|
|
Parallelism: ¶llelism,
|
|
|
|
Completions: &completions,
|
|
|
|
ManualSelector: newBool(true),
|
2015-09-25 19:07:06 +00:00
|
|
|
Template: api.PodTemplateSpec{
|
2015-09-17 19:35:25 +00:00
|
|
|
ObjectMeta: api.ObjectMeta{
|
2015-09-25 16:31:06 +00:00
|
|
|
Labels: map[string]string{jobSelectorKey: name},
|
2015-09-17 19:35:25 +00:00
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
2015-09-25 16:31:06 +00:00
|
|
|
RestartPolicy: rPol,
|
2015-10-25 05:51:02 +00:00
|
|
|
Volumes: []api.Volume{
|
|
|
|
{
|
|
|
|
Name: "data",
|
|
|
|
VolumeSource: api.VolumeSource{
|
|
|
|
EmptyDir: &api.EmptyDirVolumeSource{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-09-17 19:35:25 +00:00
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
2015-09-25 16:31:06 +00:00
|
|
|
Name: "c",
|
2016-02-08 23:28:55 +00:00
|
|
|
Image: "gcr.io/google_containers/busybox:1.24",
|
2015-09-25 16:31:06 +00:00
|
|
|
Command: []string{},
|
2015-10-25 05:51:02 +00:00
|
|
|
VolumeMounts: []api.VolumeMount{
|
|
|
|
{
|
|
|
|
MountPath: "/data",
|
|
|
|
Name: "data",
|
|
|
|
},
|
|
|
|
},
|
2015-09-17 19:35:25 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-09-25 16:31:06 +00:00
|
|
|
}
|
|
|
|
switch behavior {
|
|
|
|
case "notTerminate":
|
|
|
|
job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
|
|
|
|
case "fail":
|
|
|
|
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"}
|
|
|
|
case "succeed":
|
|
|
|
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
|
|
|
|
case "randomlySucceedOrFail":
|
|
|
|
// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
|
2015-10-20 02:41:58 +00:00
|
|
|
// Dividing by 16384 gives roughly 50/50 chance of success.
|
2015-09-25 16:31:06 +00:00
|
|
|
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
|
2015-10-25 05:51:02 +00:00
|
|
|
case "failOnce":
|
|
|
|
// Fail the first the container of the pod is run, and
|
|
|
|
// succeed the second time. Checks for file on emptydir.
|
|
|
|
// If present, succeed. If not, create but fail.
|
|
|
|
// Note that this cannot be used with RestartNever because
|
|
|
|
// it always fails the first time for a pod.
|
|
|
|
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "if [[ -r /data/foo ]] ; then exit 0 ; else touch /data/foo ; exit 1 ; fi"}
|
2015-09-25 16:31:06 +00:00
|
|
|
}
|
|
|
|
return job
|
|
|
|
}
|
|
|
|
|
2016-04-18 15:44:19 +00:00
|
|
|
func createJob(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) {
|
2015-10-12 18:18:50 +00:00
|
|
|
return c.Extensions().Jobs(ns).Create(job)
|
2015-09-25 16:31:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func deleteJob(c *client.Client, ns, name string) error {
|
2016-02-23 21:56:59 +00:00
|
|
|
return c.Extensions().Jobs(ns).Delete(name, nil)
|
2015-09-17 19:35:25 +00:00
|
|
|
}
|
|
|
|
|
2015-09-25 16:31:06 +00:00
|
|
|
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
|
2016-04-27 04:35:14 +00:00
|
|
|
func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int32) error {
|
2015-09-25 16:31:06 +00:00
|
|
|
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
|
2016-04-07 17:21:31 +00:00
|
|
|
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
|
2015-12-10 09:39:03 +00:00
|
|
|
options := api.ListOptions{LabelSelector: label}
|
2015-12-02 11:12:57 +00:00
|
|
|
pods, err := c.Pods(ns).List(options)
|
2015-09-17 19:35:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2016-04-27 04:35:14 +00:00
|
|
|
count := int32(0)
|
2015-09-17 19:35:25 +00:00
|
|
|
for _, p := range pods.Items {
|
|
|
|
if p.Status.Phase == api.PodRunning {
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return count == parallelism, nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for job to reach completions.
|
2016-04-27 04:35:14 +00:00
|
|
|
func waitForJobFinish(c *client.Client, ns, jobName string, completions int32) error {
|
2016-04-07 17:21:31 +00:00
|
|
|
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
|
2015-10-12 18:18:50 +00:00
|
|
|
curr, err := c.Extensions().Jobs(ns).Get(jobName)
|
2015-09-17 19:35:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2015-10-08 17:33:39 +00:00
|
|
|
return curr.Status.Succeeded == completions, nil
|
2015-09-17 19:35:25 +00:00
|
|
|
})
|
|
|
|
}
|
2015-11-26 15:54:04 +00:00
|
|
|
|
|
|
|
// Wait for job fail.
|
|
|
|
func waitForJobFail(c *client.Client, ns, jobName string) error {
|
2016-04-07 17:21:31 +00:00
|
|
|
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
|
2015-11-26 15:54:04 +00:00
|
|
|
curr, err := c.Extensions().Jobs(ns).Get(jobName)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
for _, c := range curr.Status.Conditions {
|
2016-04-18 15:44:19 +00:00
|
|
|
if c.Type == batch.JobFailed && c.Status == api.ConditionTrue {
|
2015-11-26 15:54:04 +00:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
})
|
|
|
|
}
|
Added Selector Generation to Job.
Added selector generation to Job's
strategy.Validate, right before validation.
Can't do in defaulting since UID is not known.
Added a validation to Job to ensure that the generated
labels and selector are correct when generation was requested.
This happens right after generation, but validation is in a better
place to return an error.
Adds "manualSelector" field to batch/v1 Job to control selector generation.
Adds same field to extensions/__internal. Conversion between those two
is automatic.
Adds "autoSelector" field to extensions/v1beta1 Job. Used for storing batch/v1 Jobs
- Default for v1 is to do generation.
- Default for v1beta1 is to not do it.
- In both cases, unset == false == do the default thing.
Release notes:
Added batch/v1 group, which contains just Job, and which is the next
version of extensions/v1beta1 Job.
The changes from the previous version are:
- Users no longer need to ensure labels on their pod template are unique to the enclosing
job (but may add labels as needed for categorization).
- In v1beta1, job.spec.selector was defaulted from pod labels, with the user responsible for uniqueness.
In v1, a unique label is generated and added to the pod template, and used as the selector (other
labels added by user stay on pod template, but need not be used by selector).
- a new field called "manualSelector" field exists to control whether the new behavior is used,
versus a more error-prone but more flexible "manual" (not generated) seletor. Most users
will not need to use this field and should leave it unset.
Users who are creating extensions.Job go objects and then posting them using the go client
will see a change in the default behavior. They need to either stop providing a selector (relying on
selector generation) or else specify "spec.manualSelector" until they are ready to do the former.
2016-02-08 23:55:40 +00:00
|
|
|
|
|
|
|
func newBool(val bool) *bool {
|
|
|
|
p := new(bool)
|
|
|
|
*p = val
|
|
|
|
return p
|
|
|
|
}
|