k3s/pkg/controller/job/jobcontroller_test.go

726 lines
23 KiB
Go
Raw Normal View History

2015-08-27 12:19:35 +00:00
/*
Copyright 2015 The Kubernetes Authors.
2015-08-27 12:19:35 +00:00
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package job
import (
"fmt"
"testing"
"time"
2015-08-27 12:19:35 +00:00
apiequality "k8s.io/apimachinery/pkg/api/equality"
2017-01-11 14:09:48 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2017-01-16 14:50:37 +00:00
"k8s.io/apimachinery/pkg/util/rand"
2017-01-11 14:09:48 +00:00
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
2017-01-19 18:27:59 +00:00
restclient "k8s.io/client-go/rest"
2017-01-25 20:07:10 +00:00
core "k8s.io/client-go/testing"
2017-01-24 14:11:51 +00:00
"k8s.io/client-go/tools/cache"
2016-11-19 23:32:10 +00:00
"k8s.io/kubernetes/pkg/api"
2016-11-18 20:50:17 +00:00
"k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
2016-12-14 01:18:17 +00:00
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
2017-02-08 21:18:21 +00:00
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
2015-08-27 12:19:35 +00:00
"k8s.io/kubernetes/pkg/controller"
)
var alwaysReady = func() bool { return true }
2016-04-27 04:35:14 +00:00
func newJob(parallelism, completions int32) *batch.Job {
j := &batch.Job{
ObjectMeta: metav1.ObjectMeta{
2015-08-27 12:19:35 +00:00
Name: "foobar",
Namespace: metav1.NamespaceDefault,
2015-08-27 12:19:35 +00:00
},
Spec: batch.JobSpec{
2016-12-03 18:57:26 +00:00
Selector: &metav1.LabelSelector{
2015-10-14 18:04:33 +00:00
MatchLabels: map[string]string{"foo": "bar"},
},
2016-11-18 20:50:17 +00:00
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
2015-08-27 12:19:35 +00:00
Labels: map[string]string{
"foo": "bar",
},
},
2016-11-18 20:50:17 +00:00
Spec: v1.PodSpec{
Containers: []v1.Container{
2015-08-27 12:19:35 +00:00
{Image: "foo/bar"},
},
},
},
},
}
// Special case: -1 for either completions or parallelism means leave nil (negative is not allowed
// in practice by validation.
if completions >= 0 {
j.Spec.Completions = &completions
} else {
j.Spec.Completions = nil
}
if parallelism >= 0 {
j.Spec.Parallelism = &parallelism
} else {
j.Spec.Parallelism = nil
}
return j
2015-08-27 12:19:35 +00:00
}
func getKey(job *batch.Job, t *testing.T) string {
2015-08-27 12:19:35 +00:00
if key, err := controller.KeyFunc(job); err != nil {
t.Errorf("Unexpected error getting key for job %v: %v", job.Name, err)
return ""
} else {
return key
}
}
2016-11-01 19:57:49 +00:00
func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) {
2017-02-08 21:18:21 +00:00
sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod())
jm := NewJobController(sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient)
2016-11-01 19:57:49 +00:00
return jm, sharedInformers
}
2015-08-27 12:19:35 +00:00
// create count pods with the given phase for the given job
2016-11-18 20:50:17 +00:00
func newPodList(count int32, status v1.PodPhase, job *batch.Job) []v1.Pod {
pods := []v1.Pod{}
2016-04-27 04:35:14 +00:00
for i := int32(0); i < count; i++ {
2016-11-18 20:50:17 +00:00
newPod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-%v", rand.String(10)),
2015-10-14 18:04:33 +00:00
Labels: job.Spec.Selector.MatchLabels,
2015-08-27 12:19:35 +00:00
Namespace: job.Namespace,
},
2016-11-18 20:50:17 +00:00
Status: v1.PodStatus{Phase: status},
2015-08-27 12:19:35 +00:00
}
pods = append(pods, newPod)
}
return pods
}
func TestControllerSyncJob(t *testing.T) {
testCases := map[string]struct {
// job setup
2016-04-27 04:35:14 +00:00
parallelism int32
completions int32
deleting bool
2015-08-27 12:19:35 +00:00
// pod setup
podControllerError error
pendingPods int32
2016-04-27 04:35:14 +00:00
activePods int32
succeededPods int32
failedPods int32
2015-08-27 12:19:35 +00:00
// expectations
2016-04-27 04:35:14 +00:00
expectedCreations int32
expectedDeletions int32
expectedActive int32
expectedSucceeded int32
expectedFailed int32
expectedComplete bool
2015-08-27 12:19:35 +00:00
}{
"job start": {
2, 5, false,
nil, 0, 0, 0, 0,
2015-08-27 12:19:35 +00:00
2, 0, 2, 0, 0, false,
},
"WQ job start": {
2, -1, false,
nil, 0, 0, 0, 0,
2, 0, 2, 0, 0, false,
},
"pending pods": {
2, 5, false,
nil, 2, 0, 0, 0,
0, 0, 2, 0, 0, false,
},
2015-08-27 12:19:35 +00:00
"correct # of pods": {
2, 5, false,
nil, 0, 2, 0, 0,
2015-08-27 12:19:35 +00:00
0, 0, 2, 0, 0, false,
},
"WQ job: correct # of pods": {
2, -1, false,
nil, 0, 2, 0, 0,
0, 0, 2, 0, 0, false,
},
2015-08-27 12:19:35 +00:00
"too few active pods": {
2, 5, false,
nil, 0, 1, 1, 0,
2015-08-27 12:19:35 +00:00
1, 0, 2, 1, 0, false,
},
"too few active pods with a dynamic job": {
2, -1, false,
nil, 0, 1, 0, 0,
1, 0, 2, 0, 0, false,
},
2015-08-27 12:19:35 +00:00
"too few active pods, with controller error": {
2, 5, false,
fmt.Errorf("Fake error"), 0, 1, 1, 0,
2016-06-10 23:28:42 +00:00
1, 0, 1, 1, 0, false,
2015-08-27 12:19:35 +00:00
},
"too many active pods": {
2, 5, false,
nil, 0, 3, 0, 0,
2015-08-27 12:19:35 +00:00
0, 1, 2, 0, 0, false,
},
"too many active pods, with controller error": {
2, 5, false,
fmt.Errorf("Fake error"), 0, 3, 0, 0,
2016-06-10 23:28:42 +00:00
0, 1, 3, 0, 0, false,
2015-08-27 12:19:35 +00:00
},
"failed pod": {
2, 5, false,
nil, 0, 1, 1, 1,
2015-08-27 12:19:35 +00:00
1, 0, 2, 1, 1, false,
},
"job finish": {
2, 5, false,
nil, 0, 0, 5, 0,
2015-08-27 12:19:35 +00:00
0, 0, 0, 5, 0, true,
},
"WQ job finishing": {
2, -1, false,
nil, 0, 1, 1, 0,
0, 0, 1, 1, 0, false,
},
"WQ job all finished": {
2, -1, false,
nil, 0, 0, 2, 0,
0, 0, 0, 2, 0, true,
},
"WQ job all finished despite one failure": {
2, -1, false,
nil, 0, 0, 1, 1,
0, 0, 0, 1, 1, true,
},
2015-08-27 12:19:35 +00:00
"more active pods than completions": {
2, 5, false,
nil, 0, 10, 0, 0,
2015-08-27 12:19:35 +00:00
0, 8, 2, 0, 0, false,
},
"status change": {
2, 5, false,
nil, 0, 2, 2, 0,
2015-08-27 12:19:35 +00:00
0, 0, 2, 2, 0, false,
},
"deleting job": {
2, 5, true,
nil, 1, 1, 1, 0,
0, 0, 2, 1, 0, false,
},
2015-08-27 12:19:35 +00:00
}
for name, tc := range testCases {
// job manager setup
2017-01-12 18:17:43 +00:00
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
2016-11-01 19:57:49 +00:00
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{Err: tc.podControllerError}
2015-08-27 12:19:35 +00:00
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
var actual *batch.Job
manager.updateHandler = func(job *batch.Job) error {
2015-08-27 12:19:35 +00:00
actual = job
return nil
}
// job & pods setup
job := newJob(tc.parallelism, tc.completions)
if tc.deleting {
2016-12-03 18:57:26 +00:00
now := metav1.Now()
job.DeletionTimestamp = &now
}
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
2016-11-18 20:50:17 +00:00
for _, pod := range newPodList(tc.pendingPods, v1.PodPending, job) {
2016-11-01 19:57:49 +00:00
podIndexer.Add(&pod)
}
2016-11-18 20:50:17 +00:00
for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
2016-11-01 19:57:49 +00:00
podIndexer.Add(&pod)
2015-08-27 12:19:35 +00:00
}
2016-11-18 20:50:17 +00:00
for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) {
2016-11-01 19:57:49 +00:00
podIndexer.Add(&pod)
2015-08-27 12:19:35 +00:00
}
2016-11-18 20:50:17 +00:00
for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) {
2016-11-01 19:57:49 +00:00
podIndexer.Add(&pod)
2015-08-27 12:19:35 +00:00
}
// run
err := manager.syncJob(getKey(job, t))
if err != nil {
t.Errorf("%s: unexpected error when syncing jobs %v", name, err)
2015-08-27 12:19:35 +00:00
}
// validate created/deleted pods
2016-04-27 04:35:14 +00:00
if int32(len(fakePodControl.Templates)) != tc.expectedCreations {
t.Errorf("%s: unexpected number of creates. Expected %d, saw %d\n", name, tc.expectedCreations, len(fakePodControl.Templates))
2015-08-27 12:19:35 +00:00
}
2016-04-27 04:35:14 +00:00
if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions {
t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName))
2015-08-27 12:19:35 +00:00
}
// validate status
if actual.Status.Active != tc.expectedActive {
t.Errorf("%s: unexpected number of active pods. Expected %d, saw %d\n", name, tc.expectedActive, actual.Status.Active)
}
if actual.Status.Succeeded != tc.expectedSucceeded {
t.Errorf("%s: unexpected number of succeeded pods. Expected %d, saw %d\n", name, tc.expectedSucceeded, actual.Status.Succeeded)
2015-08-27 12:19:35 +00:00
}
if actual.Status.Failed != tc.expectedFailed {
t.Errorf("%s: unexpected number of failed pods. Expected %d, saw %d\n", name, tc.expectedFailed, actual.Status.Failed)
2015-08-27 12:19:35 +00:00
}
if actual.Status.StartTime == nil {
t.Errorf("%s: .status.startTime was not set", name)
}
2015-08-27 12:19:35 +00:00
// validate conditions
if tc.expectedComplete && !getCondition(actual, batch.JobComplete) {
t.Errorf("%s: expected completion condition. Got %#v", name, actual.Status.Conditions)
2015-08-27 12:19:35 +00:00
}
}
}
func TestSyncJobPastDeadline(t *testing.T) {
testCases := map[string]struct {
// job setup
2016-04-27 04:35:14 +00:00
parallelism int32
completions int32
activeDeadlineSeconds int64
startTime int64
// pod setup
2016-04-27 04:35:14 +00:00
activePods int32
succeededPods int32
failedPods int32
// expectations
2016-04-27 04:35:14 +00:00
expectedDeletions int32
expectedActive int32
expectedSucceeded int32
expectedFailed int32
}{
"activeDeadlineSeconds less than single pod execution": {
1, 1, 10, 15,
1, 0, 0,
1, 0, 0, 1,
},
"activeDeadlineSeconds bigger than single pod execution": {
1, 2, 10, 15,
1, 1, 0,
1, 0, 1, 1,
},
"activeDeadlineSeconds times-out before any pod starts": {
1, 1, 10, 10,
0, 0, 0,
0, 0, 0, 0,
},
}
for name, tc := range testCases {
// job manager setup
2017-01-12 18:17:43 +00:00
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
2016-11-01 19:57:49 +00:00
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
var actual *batch.Job
manager.updateHandler = func(job *batch.Job) error {
actual = job
return nil
}
// job & pods setup
job := newJob(tc.parallelism, tc.completions)
job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds
2016-12-03 18:57:26 +00:00
start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0)
job.Status.StartTime = &start
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
2016-11-18 20:50:17 +00:00
for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
2016-11-01 19:57:49 +00:00
podIndexer.Add(&pod)
}
2016-11-18 20:50:17 +00:00
for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) {
2016-11-01 19:57:49 +00:00
podIndexer.Add(&pod)
}
2016-11-18 20:50:17 +00:00
for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) {
2016-11-01 19:57:49 +00:00
podIndexer.Add(&pod)
}
// run
err := manager.syncJob(getKey(job, t))
if err != nil {
t.Errorf("%s: unexpected error when syncing jobs %v", name, err)
}
// validate created/deleted pods
2016-04-27 04:35:14 +00:00
if int32(len(fakePodControl.Templates)) != 0 {
t.Errorf("%s: unexpected number of creates. Expected 0, saw %d\n", name, len(fakePodControl.Templates))
}
2016-04-27 04:35:14 +00:00
if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions {
t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName))
}
// validate status
if actual.Status.Active != tc.expectedActive {
t.Errorf("%s: unexpected number of active pods. Expected %d, saw %d\n", name, tc.expectedActive, actual.Status.Active)
}
if actual.Status.Succeeded != tc.expectedSucceeded {
t.Errorf("%s: unexpected number of succeeded pods. Expected %d, saw %d\n", name, tc.expectedSucceeded, actual.Status.Succeeded)
}
if actual.Status.Failed != tc.expectedFailed {
t.Errorf("%s: unexpected number of failed pods. Expected %d, saw %d\n", name, tc.expectedFailed, actual.Status.Failed)
}
if actual.Status.StartTime == nil {
t.Errorf("%s: .status.startTime was not set", name)
}
// validate conditions
if !getCondition(actual, batch.JobFailed) {
t.Errorf("%s: expected fail condition. Got %#v", name, actual.Status.Conditions)
}
}
}
func getCondition(job *batch.Job, condition batch.JobConditionType) bool {
for _, v := range job.Status.Conditions {
2016-11-18 20:50:17 +00:00
if v.Type == condition && v.Status == v1.ConditionTrue {
return true
}
}
return false
}
func TestSyncPastDeadlineJobFinished(t *testing.T) {
2017-01-12 18:17:43 +00:00
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
2016-11-01 19:57:49 +00:00
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
var actual *batch.Job
manager.updateHandler = func(job *batch.Job) error {
actual = job
return nil
}
job := newJob(1, 1)
activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
2016-12-03 18:57:26 +00:00
start := metav1.Unix(metav1.Now().Time.Unix()-15, 0)
job.Status.StartTime = &start
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
err := manager.syncJob(getKey(job, t))
if err != nil {
t.Errorf("Unexpected error when syncing jobs %v", err)
}
if len(fakePodControl.Templates) != 0 {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
}
if len(fakePodControl.DeletePodName) != 0 {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
}
if actual != nil {
t.Error("Unexpected job modification")
}
}
func TestSyncJobComplete(t *testing.T) {
2017-01-12 18:17:43 +00:00
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
2016-11-01 19:57:49 +00:00
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
job := newJob(1, 1)
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", ""))
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
err := manager.syncJob(getKey(job, t))
if err != nil {
t.Fatalf("Unexpected error when syncing jobs %v", err)
}
2016-11-01 19:57:49 +00:00
actual, err := manager.jobLister.Jobs(job.Namespace).Get(job.Name)
if err != nil {
t.Fatalf("Unexpected error when trying to get job from the store: %v", err)
}
// Verify that after syncing a complete job, the conditions are the same.
if got, expected := len(actual.Status.Conditions), 1; got != expected {
t.Fatalf("Unexpected job status conditions amount; expected %d, got %d", expected, got)
}
}
2015-08-27 12:19:35 +00:00
func TestSyncJobDeleted(t *testing.T) {
2017-01-12 18:17:43 +00:00
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
2016-11-01 19:57:49 +00:00
manager, _ := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
2015-08-27 12:19:35 +00:00
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
manager.updateHandler = func(job *batch.Job) error { return nil }
job := newJob(2, 2)
2015-08-27 12:19:35 +00:00
err := manager.syncJob(getKey(job, t))
if err != nil {
t.Errorf("Unexpected error when syncing jobs %v", err)
}
if len(fakePodControl.Templates) != 0 {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
2015-08-27 12:19:35 +00:00
}
if len(fakePodControl.DeletePodName) != 0 {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
2015-08-27 12:19:35 +00:00
}
}
func TestSyncJobUpdateRequeue(t *testing.T) {
2017-01-12 18:17:43 +00:00
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
2016-11-01 19:57:49 +00:00
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
2015-08-27 12:19:35 +00:00
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
updateError := fmt.Errorf("Update error")
manager.updateHandler = func(job *batch.Job) error {
manager.queue.AddRateLimited(getKey(job, t))
return updateError
}
job := newJob(2, 2)
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
2015-08-27 12:19:35 +00:00
err := manager.syncJob(getKey(job, t))
if err == nil || err != updateError {
t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
2015-08-27 12:19:35 +00:00
}
2015-10-01 21:35:58 +00:00
t.Log("Waiting for a job in the queue")
key, _ := manager.queue.Get()
expectedKey := getKey(job, t)
if key != expectedKey {
t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key)
2015-08-27 12:19:35 +00:00
}
}
func TestJobPodLookup(t *testing.T) {
2017-01-12 18:17:43 +00:00
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
2016-11-01 19:57:49 +00:00
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
2015-08-27 12:19:35 +00:00
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
2015-08-27 12:19:35 +00:00
testCases := []struct {
job *batch.Job
2016-11-18 20:50:17 +00:00
pod *v1.Pod
2015-08-27 12:19:35 +00:00
expectedName string
}{
// pods without labels don't match any job
{
job: &batch.Job{
ObjectMeta: metav1.ObjectMeta{Name: "basic"},
2015-08-27 12:19:35 +00:00
},
2016-11-18 20:50:17 +00:00
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll},
2015-08-27 12:19:35 +00:00
},
expectedName: "",
},
// matching labels, different namespace
{
job: &batch.Job{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: batch.JobSpec{
2016-12-03 18:57:26 +00:00
Selector: &metav1.LabelSelector{
2015-10-14 18:04:33 +00:00
MatchLabels: map[string]string{"foo": "bar"},
},
2015-08-27 12:19:35 +00:00
},
},
2016-11-18 20:50:17 +00:00
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
2015-08-27 12:19:35 +00:00
Name: "foo2",
Namespace: "ns",
Labels: map[string]string{"foo": "bar"},
},
},
expectedName: "",
},
// matching ns and labels returns
{
job: &batch.Job{
ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns"},
Spec: batch.JobSpec{
2016-12-03 18:57:26 +00:00
Selector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
2015-10-14 18:04:33 +00:00
{
Key: "foo",
2016-12-03 18:57:26 +00:00
Operator: metav1.LabelSelectorOpIn,
2015-10-14 18:04:33 +00:00
Values: []string{"bar"},
},
},
},
2015-08-27 12:19:35 +00:00
},
},
2016-11-18 20:50:17 +00:00
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
2015-08-27 12:19:35 +00:00
Name: "foo3",
Namespace: "ns",
Labels: map[string]string{"foo": "bar"},
},
},
expectedName: "bar",
},
}
for _, tc := range testCases {
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(tc.job)
2015-08-27 12:19:35 +00:00
if job := manager.getPodJob(tc.pod); job != nil {
if tc.expectedName != job.Name {
t.Errorf("Got job %+v expected %+v", job.Name, tc.expectedName)
}
} else if tc.expectedName != "" {
t.Errorf("Expected a job %v pod %v, found none", tc.expectedName, tc.pod.Name)
}
}
}
type FakeJobExpectations struct {
*controller.ControllerExpectations
satisfied bool
expSatisfied func()
}
func (fe FakeJobExpectations) SatisfiedExpectations(controllerKey string) bool {
fe.expSatisfied()
return fe.satisfied
}
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations.
func TestSyncJobExpectations(t *testing.T) {
2017-01-12 18:17:43 +00:00
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
2016-11-01 19:57:49 +00:00
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
2015-08-27 12:19:35 +00:00
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
manager.updateHandler = func(job *batch.Job) error { return nil }
2015-08-27 12:19:35 +00:00
job := newJob(2, 2)
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
2016-11-18 20:50:17 +00:00
pods := newPodList(2, v1.PodPending, job)
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
2016-11-01 19:57:49 +00:00
podIndexer.Add(&pods[0])
2015-08-27 12:19:35 +00:00
manager.expectations = FakeJobExpectations{
controller.NewControllerExpectations(), true, func() {
// If we check active pods before checking expectataions, the job
// will create a new replica because it doesn't see this pod, but
// has fulfilled its expectations.
2016-11-01 19:57:49 +00:00
podIndexer.Add(&pods[1])
2015-08-27 12:19:35 +00:00
},
}
manager.syncJob(getKey(job, t))
if len(fakePodControl.Templates) != 0 {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
2015-08-27 12:19:35 +00:00
}
if len(fakePodControl.DeletePodName) != 0 {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
2015-08-27 12:19:35 +00:00
}
}
func TestWatchJobs(t *testing.T) {
clientset := fake.NewSimpleClientset()
2015-08-27 12:19:35 +00:00
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
2016-11-01 19:57:49 +00:00
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
2015-08-27 12:19:35 +00:00
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
2015-08-27 12:19:35 +00:00
var testJob batch.Job
2015-10-01 21:35:58 +00:00
received := make(chan struct{})
2015-08-27 12:19:35 +00:00
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler.
manager.syncHandler = func(key string) error {
2016-11-01 19:57:49 +00:00
defer close(received)
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
t.Errorf("Error getting namespace/name from key %v: %v", key, err)
2015-08-27 12:19:35 +00:00
}
2016-11-01 19:57:49 +00:00
job, err := manager.jobLister.Jobs(ns).Get(name)
if err != nil || job == nil {
t.Errorf("Expected to find job under key %v: %v", key, err)
return nil
}
if !apiequality.Semantic.DeepDerivative(*job, testJob) {
t.Errorf("Expected %#v, but got %#v", testJob, *job)
2015-08-27 12:19:35 +00:00
}
return nil
}
// Start only the job watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
2016-11-01 19:57:49 +00:00
sharedInformerFactory.Start(stopCh)
go manager.Run(1, stopCh)
2015-08-27 12:19:35 +00:00
// We're sending new job to see if it reaches syncHandler.
2016-11-01 19:57:49 +00:00
testJob.Namespace = "bar"
2015-08-27 12:19:35 +00:00
testJob.Name = "foo"
fakeWatch.Add(&testJob)
2015-10-01 21:35:58 +00:00
t.Log("Waiting for job to reach syncHandler")
<-received
}
2015-08-27 12:19:35 +00:00
func TestWatchPods(t *testing.T) {
testJob := newJob(2, 2)
clientset := fake.NewSimpleClientset(testJob)
2015-08-27 12:19:35 +00:00
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
2016-11-01 19:57:49 +00:00
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
2015-08-27 12:19:35 +00:00
manager.podStoreSynced = alwaysReady
2016-11-01 19:57:49 +00:00
manager.jobStoreSynced = alwaysReady
2015-08-27 12:19:35 +00:00
// Put one job and one pod into the store
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(testJob)
2015-10-01 21:35:58 +00:00
received := make(chan struct{})
2015-08-27 12:19:35 +00:00
// The pod update sent through the fakeWatcher should figure out the managing job and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
2016-11-01 19:57:49 +00:00
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
t.Errorf("Error getting namespace/name from key %v: %v", key, err)
}
2016-11-01 19:57:49 +00:00
job, err := manager.jobLister.Jobs(ns).Get(name)
if err != nil {
t.Errorf("Expected to find job under key %v: %v", key, err)
2015-08-27 12:19:35 +00:00
}
if !apiequality.Semantic.DeepDerivative(job, testJob) {
2015-08-27 12:19:35 +00:00
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
close(received)
return nil
2015-08-27 12:19:35 +00:00
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right job.
stopCh := make(chan struct{})
defer close(stopCh)
go sharedInformerFactory.Core().V1().Pods().Informer().Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
2015-08-27 12:19:35 +00:00
2016-11-18 20:50:17 +00:00
pods := newPodList(1, v1.PodRunning, testJob)
2015-08-27 12:19:35 +00:00
testPod := pods[0]
2016-11-18 20:50:17 +00:00
testPod.Status.Phase = v1.PodFailed
2015-08-27 12:19:35 +00:00
fakeWatch.Add(&testPod)
2015-10-01 21:35:58 +00:00
t.Log("Waiting for pod to reach syncHandler")
<-received
2015-08-27 12:19:35 +00:00
}