mirror of https://github.com/k3s-io/k3s
Remove extensions/v1beta1 Job
parent
e0ecb09fda
commit
9f064c57ce
|
@ -193,7 +193,6 @@ func Run(s *options.ServerRunOptions) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error in initializing storage factory: %s", err)
|
||||
}
|
||||
storageFactory.AddCohabitatingResources(batch.Resource("jobs"), extensions.Resource("jobs"))
|
||||
storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers"))
|
||||
for _, override := range s.Etcd.EtcdServersOverrides {
|
||||
tokens := strings.Split(override, "#")
|
||||
|
|
|
@ -24,9 +24,22 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/controller/cronjob"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func startJobController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}] {
|
||||
return false, nil
|
||||
}
|
||||
go job.NewJobController(
|
||||
ctx.InformerFactory.Pods().Informer(),
|
||||
ctx.InformerFactory.Jobs(),
|
||||
ctx.ClientBuilder.ClientOrDie("job-controller"),
|
||||
).Run(int(ctx.Options.ConcurrentJobSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func startCronJobController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}] {
|
||||
return false, nil
|
||||
|
|
|
@ -23,7 +23,6 @@ package app
|
|||
import (
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
replicaset "k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -42,18 +41,6 @@ func startDaemonSetController(ctx ControllerContext) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func startJobController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "jobs"}] {
|
||||
return false, nil
|
||||
}
|
||||
go job.NewJobController(
|
||||
ctx.InformerFactory.Pods().Informer(),
|
||||
ctx.InformerFactory.Jobs(),
|
||||
ctx.ClientBuilder.ClientOrDie("job-controller"),
|
||||
).Run(int(ctx.Options.ConcurrentJobSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func startDeploymentController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] {
|
||||
return false, nil
|
||||
|
|
|
@ -1198,12 +1198,6 @@ __EOF__
|
|||
# Pre-Condition: no Job exists
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl run pi --generator=job/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
|
||||
# Post-Condition: Job "pi" is created
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
|
||||
# Clean up
|
||||
kubectl delete jobs pi "${kube_flags[@]}"
|
||||
# Command
|
||||
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
|
||||
# Post-Condition: Job "pi" is created
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
|
||||
|
|
|
@ -109,16 +109,14 @@ echo "${ETCD_VERSION}/${STORAGE_BACKEND_ETCD2}" > "${ETCD_DIR}/version.txt"
|
|||
|
||||
# source_file,resource,namespace,name,old_version,new_version
|
||||
tests=(
|
||||
test/fixtures/doc-yaml/user-guide/job.yaml,jobs,default,pi,extensions/v1beta1,batch/v1
|
||||
test/fixtures/doc-yaml/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml,horizontalpodautoscalers,default,php-apache,extensions/v1beta1,autoscaling/v1
|
||||
)
|
||||
|
||||
# need to include extensions/v1beta1 in new api version because its internal types are used by jobs
|
||||
# and hpas
|
||||
# need to include extensions/v1beta1 in new api version because its internal types are used by hpas
|
||||
KUBE_OLD_API_VERSION="v1,extensions/v1beta1"
|
||||
KUBE_NEW_API_VERSION="v1,extensions/v1beta1,batch/v1,autoscaling/v1"
|
||||
KUBE_OLD_STORAGE_VERSIONS="batch=extensions/v1beta1,autoscaling=extensions/v1beta1"
|
||||
KUBE_NEW_STORAGE_VERSIONS="batch/v1,autoscaling/v1"
|
||||
KUBE_NEW_API_VERSION="v1,extensions/v1beta1,autoscaling/v1"
|
||||
KUBE_OLD_STORAGE_VERSIONS="autoscaling=extensions/v1beta1"
|
||||
KUBE_NEW_STORAGE_VERSIONS="autoscaling/v1"
|
||||
|
||||
### END TEST DEFINITION CUSTOMIZATION ###
|
||||
|
||||
|
|
|
@ -104,8 +104,6 @@ func TestDefaulting(t *testing.T) {
|
|||
{Group: "extensions", Version: "v1beta1", Kind: "DeploymentList"}: {},
|
||||
{Group: "extensions", Version: "v1beta1", Kind: "HorizontalPodAutoscaler"}: {},
|
||||
{Group: "extensions", Version: "v1beta1", Kind: "HorizontalPodAutoscalerList"}: {},
|
||||
{Group: "extensions", Version: "v1beta1", Kind: "Job"}: {},
|
||||
{Group: "extensions", Version: "v1beta1", Kind: "JobList"}: {},
|
||||
{Group: "extensions", Version: "v1beta1", Kind: "ReplicaSet"}: {},
|
||||
{Group: "extensions", Version: "v1beta1", Kind: "ReplicaSetList"}: {},
|
||||
{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRoleBinding"}: {},
|
||||
|
|
|
@ -19,7 +19,6 @@ package extensions
|
|||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||
|
@ -55,9 +54,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&DeploymentRollback{},
|
||||
&autoscaling.HorizontalPodAutoscaler{},
|
||||
&autoscaling.HorizontalPodAutoscalerList{},
|
||||
&batch.Job{},
|
||||
&batch.JobList{},
|
||||
&batch.JobTemplate{},
|
||||
&ReplicationControllerDummy{},
|
||||
&Scale{},
|
||||
&ThirdPartyResource{},
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
|
@ -48,9 +47,6 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
|
|||
Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference,
|
||||
Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec,
|
||||
Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec,
|
||||
// batch
|
||||
Convert_batch_JobSpec_To_v1beta1_JobSpec,
|
||||
Convert_v1beta1_JobSpec_To_batch_JobSpec,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -74,16 +70,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
|
|||
}
|
||||
}
|
||||
|
||||
return api.Scheme.AddFieldLabelConversionFunc("extensions/v1beta1", "Job",
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name", "metadata.namespace", "status.successful":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error {
|
||||
|
@ -262,56 +249,6 @@ func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetS
|
|||
return nil
|
||||
}
|
||||
|
||||
func Convert_batch_JobSpec_To_v1beta1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error {
|
||||
out.Parallelism = in.Parallelism
|
||||
out.Completions = in.Completions
|
||||
out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
|
||||
out.Selector = in.Selector
|
||||
// BEGIN non-standard conversion
|
||||
// autoSelector has opposite meaning as manualSelector.
|
||||
// in both cases, unset means false, and unset is always preferred to false.
|
||||
// unset vs set-false distinction is not preserved.
|
||||
manualSelector := in.ManualSelector != nil && *in.ManualSelector
|
||||
autoSelector := !manualSelector
|
||||
if autoSelector {
|
||||
out.AutoSelector = new(bool)
|
||||
*out.AutoSelector = true
|
||||
} else {
|
||||
out.AutoSelector = nil
|
||||
}
|
||||
// END non-standard conversion
|
||||
|
||||
if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1beta1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error {
|
||||
out.Parallelism = in.Parallelism
|
||||
out.Completions = in.Completions
|
||||
out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
|
||||
out.Selector = in.Selector
|
||||
// BEGIN non-standard conversion
|
||||
// autoSelector has opposite meaning as manualSelector.
|
||||
// in both cases, unset means false, and unset is always preferred to false.
|
||||
// unset vs set-false distinction is not preserved.
|
||||
autoSelector := bool(in.AutoSelector != nil && *in.AutoSelector)
|
||||
manualSelector := !autoSelector
|
||||
if manualSelector {
|
||||
out.ManualSelector = new(bool)
|
||||
*out.ManualSelector = true
|
||||
} else {
|
||||
out.ManualSelector = nil
|
||||
}
|
||||
// END non-standard conversion
|
||||
|
||||
if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(in *autoscaling.CrossVersionObjectReference, out *SubresourceReference, s conversion.Scope) error {
|
||||
out.Kind = in.Kind
|
||||
out.Name = in.Name
|
||||
|
|
|
@ -1,83 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
versioned "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
)
|
||||
|
||||
// TestJobSpecConversion tests that ManualSelector and AutoSelector
|
||||
// are handled correctly.
|
||||
func TestJobSpecConversion(t *testing.T) {
|
||||
pTrue := new(bool)
|
||||
*pTrue = true
|
||||
pFalse := new(bool)
|
||||
*pFalse = false
|
||||
|
||||
// False or nil convert to true.
|
||||
// True converts to nil.
|
||||
tests := []struct {
|
||||
in *bool
|
||||
expectOut *bool
|
||||
}{
|
||||
{
|
||||
in: nil,
|
||||
expectOut: pTrue,
|
||||
},
|
||||
{
|
||||
in: pFalse,
|
||||
expectOut: pTrue,
|
||||
},
|
||||
{
|
||||
in: pTrue,
|
||||
expectOut: nil,
|
||||
},
|
||||
}
|
||||
|
||||
// Test internal -> v1beta1.
|
||||
for _, test := range tests {
|
||||
i := &batch.JobSpec{
|
||||
ManualSelector: test.in,
|
||||
}
|
||||
v := versioned.JobSpec{}
|
||||
if err := api.Scheme.Convert(i, &v, nil); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectOut, v.AutoSelector) {
|
||||
t.Fatalf("want v1beta1.AutoSelector %v, got %v", test.expectOut, v.AutoSelector)
|
||||
}
|
||||
}
|
||||
|
||||
// Test v1beta1 -> internal.
|
||||
for _, test := range tests {
|
||||
i := &versioned.JobSpec{
|
||||
AutoSelector: test.in,
|
||||
}
|
||||
e := batch.JobSpec{}
|
||||
if err := api.Scheme.Convert(i, &e, nil); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectOut, e.ManualSelector) {
|
||||
t.Fatalf("want extensions.ManualSelector %v, got %v", test.expectOut, e.ManualSelector)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -28,7 +28,6 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
|||
return scheme.AddDefaultingFuncs(
|
||||
SetDefaults_DaemonSet,
|
||||
SetDefaults_Deployment,
|
||||
SetDefaults_Job,
|
||||
SetDefaults_HorizontalPodAutoscaler,
|
||||
SetDefaults_ReplicaSet,
|
||||
SetDefaults_NetworkPolicy,
|
||||
|
@ -91,40 +90,6 @@ func SetDefaults_Deployment(obj *Deployment) {
|
|||
}
|
||||
}
|
||||
|
||||
func SetDefaults_Job(obj *Job) {
|
||||
labels := obj.Spec.Template.Labels
|
||||
// TODO: support templates defined elsewhere when we support them in the API
|
||||
if labels != nil {
|
||||
// if an autoselector is requested, we'll build the selector later with controller-uid and job-name
|
||||
autoSelector := bool(obj.Spec.AutoSelector != nil && *obj.Spec.AutoSelector)
|
||||
|
||||
// otherwise, we are using a manual selector
|
||||
manualSelector := !autoSelector
|
||||
|
||||
// and default behavior for an unspecified manual selector is to use the pod template labels
|
||||
if manualSelector && obj.Spec.Selector == nil {
|
||||
obj.Spec.Selector = &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
}
|
||||
}
|
||||
if len(obj.Labels) == 0 {
|
||||
obj.Labels = labels
|
||||
}
|
||||
}
|
||||
// For a non-parallel job, you can leave both `.spec.completions` and
|
||||
// `.spec.parallelism` unset. When both are unset, both are defaulted to 1.
|
||||
if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil {
|
||||
obj.Spec.Completions = new(int32)
|
||||
*obj.Spec.Completions = 1
|
||||
obj.Spec.Parallelism = new(int32)
|
||||
*obj.Spec.Parallelism = 1
|
||||
}
|
||||
if obj.Spec.Parallelism == nil {
|
||||
obj.Spec.Parallelism = new(int32)
|
||||
*obj.Spec.Parallelism = 1
|
||||
}
|
||||
}
|
||||
|
||||
func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) {
|
||||
if obj.Spec.MinReplicas == nil {
|
||||
minReplicas := int32(1)
|
||||
|
|
|
@ -286,278 +286,6 @@ func TestSetDefaultDeployment(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultJob(t *testing.T) {
|
||||
defaultLabels := map[string]string{"default": "default"}
|
||||
tests := map[string]struct {
|
||||
original *Job
|
||||
expected *Job
|
||||
expectLabels bool
|
||||
}{
|
||||
"both unspecified -> sets both to 1": {
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: defaultLabels},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Spec: JobSpec{
|
||||
Completions: newInt32(1),
|
||||
Parallelism: newInt32(1),
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
},
|
||||
"both unspecified -> sets both to 1 and no default labels": {
|
||||
original: &Job{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"mylabel": "myvalue"},
|
||||
},
|
||||
Spec: JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: defaultLabels},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Spec: JobSpec{
|
||||
Completions: newInt32(1),
|
||||
Parallelism: newInt32(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
"WQ: Parallelism explicitly 0 and completions unset -> no change": {
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Parallelism: newInt32(0),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: defaultLabels},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Spec: JobSpec{
|
||||
Parallelism: newInt32(0),
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
},
|
||||
"WQ: Parallelism explicitly 2 and completions unset -> no change": {
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Parallelism: newInt32(2),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: defaultLabels},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Spec: JobSpec{
|
||||
Parallelism: newInt32(2),
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
},
|
||||
"Completions explicitly 2 and parallelism unset -> parallelism is defaulted": {
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Completions: newInt32(2),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: defaultLabels},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Spec: JobSpec{
|
||||
Completions: newInt32(2),
|
||||
Parallelism: newInt32(1),
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
},
|
||||
"Both set -> no change": {
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Completions: newInt32(10),
|
||||
Parallelism: newInt32(11),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: defaultLabels},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Spec: JobSpec{
|
||||
Completions: newInt32(10),
|
||||
Parallelism: newInt32(11),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: defaultLabels},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
},
|
||||
"Both set, flipped -> no change": {
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Completions: newInt32(11),
|
||||
Parallelism: newInt32(10),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: defaultLabels},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Spec: JobSpec{
|
||||
Completions: newInt32(11),
|
||||
Parallelism: newInt32(10),
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
original := test.original
|
||||
expected := test.expected
|
||||
obj2 := roundTrip(t, runtime.Object(original))
|
||||
actual, ok := obj2.(*Job)
|
||||
if !ok {
|
||||
t.Errorf("%s: unexpected object: %v", name, actual)
|
||||
t.FailNow()
|
||||
}
|
||||
if (actual.Spec.Completions == nil) != (expected.Spec.Completions == nil) {
|
||||
t.Errorf("%s: got different *completions than expected: %v %v", name, actual.Spec.Completions, expected.Spec.Completions)
|
||||
}
|
||||
if actual.Spec.Completions != nil && expected.Spec.Completions != nil {
|
||||
if *actual.Spec.Completions != *expected.Spec.Completions {
|
||||
t.Errorf("%s: got different completions than expected: %d %d", name, *actual.Spec.Completions, *expected.Spec.Completions)
|
||||
}
|
||||
}
|
||||
if (actual.Spec.Parallelism == nil) != (expected.Spec.Parallelism == nil) {
|
||||
t.Errorf("%s: got different *Parallelism than expected: %v %v", name, actual.Spec.Parallelism, expected.Spec.Parallelism)
|
||||
}
|
||||
if actual.Spec.Parallelism != nil && expected.Spec.Parallelism != nil {
|
||||
if *actual.Spec.Parallelism != *expected.Spec.Parallelism {
|
||||
t.Errorf("%s: got different parallelism than expected: %d %d", name, *actual.Spec.Parallelism, *expected.Spec.Parallelism)
|
||||
}
|
||||
}
|
||||
if test.expectLabels != reflect.DeepEqual(actual.Labels, actual.Spec.Template.Labels) {
|
||||
if test.expectLabels {
|
||||
t.Errorf("%s: expected: %v, got: %v", name, actual.Spec.Template.Labels, actual.Labels)
|
||||
} else {
|
||||
t.Errorf("%s: unexpected equality: %v", name, actual.Labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultJobSelector(t *testing.T) {
|
||||
tests := []struct {
|
||||
original *Job
|
||||
expectedSelector *metav1.LabelSelector
|
||||
}{
|
||||
// selector set explicitly, nil autoSelector
|
||||
{
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"job": "selector"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"job": "selector"},
|
||||
},
|
||||
},
|
||||
// selector set explicitly, autoSelector=true
|
||||
{
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"job": "selector"},
|
||||
},
|
||||
AutoSelector: newBool(true),
|
||||
},
|
||||
},
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"job": "selector"},
|
||||
},
|
||||
},
|
||||
// selector set explicitly, autoSelector=false
|
||||
{
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"job": "selector"},
|
||||
},
|
||||
AutoSelector: newBool(false),
|
||||
},
|
||||
},
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"job": "selector"},
|
||||
},
|
||||
},
|
||||
// selector from template labels
|
||||
{
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"job": "selector"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"job": "selector"},
|
||||
},
|
||||
},
|
||||
// selector from template labels, autoSelector=false
|
||||
{
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"job": "selector"},
|
||||
},
|
||||
},
|
||||
AutoSelector: newBool(false),
|
||||
},
|
||||
},
|
||||
expectedSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"job": "selector"},
|
||||
},
|
||||
},
|
||||
// selector not copied from template labels, autoSelector=true
|
||||
{
|
||||
original: &Job{
|
||||
Spec: JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"job": "selector"},
|
||||
},
|
||||
},
|
||||
AutoSelector: newBool(true),
|
||||
},
|
||||
},
|
||||
expectedSelector: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testcase := range tests {
|
||||
obj2 := roundTrip(t, runtime.Object(testcase.original))
|
||||
got, ok := obj2.(*Job)
|
||||
if !ok {
|
||||
t.Errorf("%d: unexpected object: %v", i, got)
|
||||
t.FailNow()
|
||||
}
|
||||
if !reflect.DeepEqual(got.Spec.Selector, testcase.expectedSelector) {
|
||||
t.Errorf("%d: got different selectors %#v %#v", i, got.Spec.Selector, testcase.expectedSelector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultReplicaSet(t *testing.T) {
|
||||
tests := []struct {
|
||||
rs *ReplicaSet
|
||||
|
|
|
@ -48,8 +48,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&DeploymentRollback{},
|
||||
&HorizontalPodAutoscaler{},
|
||||
&HorizontalPodAutoscalerList{},
|
||||
&Job{},
|
||||
&JobList{},
|
||||
&ReplicationControllerDummy{},
|
||||
&Scale{},
|
||||
&ThirdPartyResource{},
|
||||
|
|
|
@ -616,149 +616,6 @@ type ThirdPartyResourceDataList struct {
|
|||
|
||||
// +genclient=true
|
||||
|
||||
// Job represents the configuration of a single job.
|
||||
// DEPRECATED: extensions/v1beta1.Job is deprecated, use batch/v1.Job instead.
|
||||
type Job struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec is a structure defining the expected behavior of a job.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
|
||||
// Status is a structure describing current status of a job.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// JobList is a collection of jobs.
|
||||
// DEPRECATED: extensions/v1beta1.JobList is deprecated, use batch/v1.JobList instead.
|
||||
type JobList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is the list of Job.
|
||||
Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// JobSpec describes how the job execution will look like.
|
||||
type JobSpec struct {
|
||||
|
||||
// Parallelism specifies the maximum desired number of pods the job should
|
||||
// run at any given time. The actual number of pods running in steady state will
|
||||
// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
|
||||
// i.e. when the work left to do is less than max parallelism.
|
||||
// More info: http://kubernetes.io/docs/user-guide/jobs
|
||||
// +optional
|
||||
Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"`
|
||||
|
||||
// Completions specifies the desired number of successfully finished pods the
|
||||
// job should be run with. Setting to nil means that the success of any
|
||||
// pod signals the success of all pods, and allows parallelism to have any positive
|
||||
// value. Setting to 1 means that parallelism is limited to 1 and the success of that
|
||||
// pod signals the success of the job.
|
||||
// More info: http://kubernetes.io/docs/user-guide/jobs
|
||||
// +optional
|
||||
Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"`
|
||||
|
||||
// Optional duration in seconds relative to the startTime that the job may be active
|
||||
// before the system tries to terminate it; value must be positive integer
|
||||
// +optional
|
||||
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"`
|
||||
|
||||
// Selector is a label query over pods that should match the pod count.
|
||||
// Normally, the system sets this field for you.
|
||||
// More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
|
||||
// +optional
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
|
||||
|
||||
// AutoSelector controls generation of pod labels and pod selectors.
|
||||
// It was not present in the original extensions/v1beta1 Job definition, but exists
|
||||
// to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite
|
||||
// meaning as, ManualSelector.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md
|
||||
// +optional
|
||||
AutoSelector *bool `json:"autoSelector,omitempty" protobuf:"varint,5,opt,name=autoSelector"`
|
||||
|
||||
// Template is the object that describes the pod that will be created when
|
||||
// executing a job.
|
||||
// More info: http://kubernetes.io/docs/user-guide/jobs
|
||||
Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"`
|
||||
}
|
||||
|
||||
// JobStatus represents the current state of a Job.
|
||||
type JobStatus struct {
|
||||
|
||||
// Conditions represent the latest available observations of an object's current state.
|
||||
// More info: http://kubernetes.io/docs/user-guide/jobs
|
||||
// +optional
|
||||
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||
|
||||
// StartTime represents time when the job was acknowledged by the Job Manager.
|
||||
// It is not guaranteed to be set in happens-before order across separate operations.
|
||||
// It is represented in RFC3339 form and is in UTC.
|
||||
// +optional
|
||||
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
|
||||
|
||||
// CompletionTime represents time when the job was completed. It is not guaranteed to
|
||||
// be set in happens-before order across separate operations.
|
||||
// It is represented in RFC3339 form and is in UTC.
|
||||
// +optional
|
||||
CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"`
|
||||
|
||||
// Active is the number of actively running pods.
|
||||
// +optional
|
||||
Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"`
|
||||
|
||||
// Succeeded is the number of pods which reached Phase Succeeded.
|
||||
// +optional
|
||||
Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"`
|
||||
|
||||
// Failed is the number of pods which reached Phase Failed.
|
||||
// +optional
|
||||
Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"`
|
||||
}
|
||||
|
||||
type JobConditionType string
|
||||
|
||||
// These are valid conditions of a job.
|
||||
const (
|
||||
// JobComplete means the job has completed its execution.
|
||||
JobComplete JobConditionType = "Complete"
|
||||
// JobFailed means the job has failed its execution.
|
||||
JobFailed JobConditionType = "Failed"
|
||||
)
|
||||
|
||||
// JobCondition describes current state of a job.
|
||||
type JobCondition struct {
|
||||
// Type of job condition, Complete or Failed.
|
||||
Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"`
|
||||
// Status of the condition, one of True, False, Unknown.
|
||||
Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
|
||||
// Last time the condition was checked.
|
||||
// +optional
|
||||
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
|
||||
// Last time the condition transit from one status to another.
|
||||
// +optional
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
|
||||
// (brief) reason for the condition's last transition.
|
||||
// +optional
|
||||
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
|
||||
// Human readable message indicating details about last transition.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
|
||||
// Ingress is a collection of rules that allow inbound connections to reach the
|
||||
// endpoints defined by a backend. An Ingress can be configured to give services
|
||||
// externally-reachable urls, load balance traffic, terminate SSL, offer name
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/auth/authorizer"
|
||||
)
|
||||
|
||||
|
@ -88,13 +88,13 @@ func TestGetAuthorizerAttributes(t *testing.T) {
|
|||
},
|
||||
"API group resource": {
|
||||
Verb: "GET",
|
||||
Path: "/apis/extensions/v1beta1/namespaces/myns/jobs",
|
||||
Path: "/apis/batch/v1/namespaces/myns/jobs",
|
||||
ExpectedAttributes: &authorizer.AttributesRecord{
|
||||
Verb: "list",
|
||||
Path: "/apis/extensions/v1beta1/namespaces/myns/jobs",
|
||||
Path: "/apis/batch/v1/namespaces/myns/jobs",
|
||||
ResourceRequest: true,
|
||||
APIGroup: extensions.GroupName,
|
||||
APIVersion: "v1beta1",
|
||||
APIGroup: batch.GroupName,
|
||||
APIVersion: "v1",
|
||||
Namespace: "myns",
|
||||
Resource: "jobs",
|
||||
},
|
||||
|
|
|
@ -169,9 +169,9 @@ func TestGetNonAPIRequestInfo(t *testing.T) {
|
|||
"simple groupless": {"/api/version/resource", true},
|
||||
"simple group": {"/apis/group/version/resource/name/subresource", true},
|
||||
"more steps": {"/api/version/resource/name/subresource", true},
|
||||
"group list": {"/apis/extensions/v1beta1/job", true},
|
||||
"group get": {"/apis/extensions/v1beta1/job/foo", true},
|
||||
"group subresource": {"/apis/extensions/v1beta1/job/foo/scale", true},
|
||||
"group list": {"/apis/batch/v1/job", true},
|
||||
"group get": {"/apis/batch/v1/job/foo", true},
|
||||
"group subresource": {"/apis/batch/v1/job/foo/scale", true},
|
||||
|
||||
"bad root": {"/not-api/version/resource", false},
|
||||
"group without enough steps": {"/apis/extensions/v1beta1", false},
|
||||
|
|
|
@ -1,129 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/api"
|
||||
v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
meta_v1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
core "k8s.io/kubernetes/pkg/client/testing/core"
|
||||
labels "k8s.io/kubernetes/pkg/labels"
|
||||
schema "k8s.io/kubernetes/pkg/runtime/schema"
|
||||
watch "k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
// FakeJobs implements JobInterface
|
||||
type FakeJobs struct {
|
||||
Fake *FakeExtensionsV1beta1
|
||||
ns string
|
||||
}
|
||||
|
||||
var jobsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "jobs"}
|
||||
|
||||
func (c *FakeJobs) Create(job *v1beta1.Job) (result *v1beta1.Job, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewCreateAction(jobsResource, c.ns, job), &v1beta1.Job{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.Job), err
|
||||
}
|
||||
|
||||
func (c *FakeJobs) Update(job *v1beta1.Job) (result *v1beta1.Job, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewUpdateAction(jobsResource, c.ns, job), &v1beta1.Job{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.Job), err
|
||||
}
|
||||
|
||||
func (c *FakeJobs) UpdateStatus(job *v1beta1.Job) (*v1beta1.Job, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1beta1.Job{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.Job), err
|
||||
}
|
||||
|
||||
func (c *FakeJobs) Delete(name string, options *v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(core.NewDeleteAction(jobsResource, c.ns, name), &v1beta1.Job{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *FakeJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := core.NewDeleteCollectionAction(jobsResource, c.ns, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1beta1.JobList{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *FakeJobs) Get(name string, options meta_v1.GetOptions) (result *v1beta1.Job, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewGetAction(jobsResource, c.ns, name), &v1beta1.Job{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.Job), err
|
||||
}
|
||||
|
||||
func (c *FakeJobs) List(opts v1.ListOptions) (result *v1beta1.JobList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewListAction(jobsResource, c.ns, opts), &v1beta1.JobList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := core.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1beta1.JobList{}
|
||||
for _, item := range obj.(*v1beta1.JobList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested jobs.
|
||||
func (c *FakeJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(core.NewWatchAction(jobsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched job.
|
||||
func (c *FakeJobs) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.Job, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewPatchSubresourceAction(jobsResource, c.ns, name, data, subresources...), &v1beta1.Job{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.Job), err
|
||||
}
|
|
@ -1,172 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/api"
|
||||
v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
meta_v1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/kubernetes/pkg/client/restclient"
|
||||
watch "k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
// JobsGetter has a method to return a JobInterface.
|
||||
// A group's client should implement this interface.
|
||||
type JobsGetter interface {
|
||||
Jobs(namespace string) JobInterface
|
||||
}
|
||||
|
||||
// JobInterface has methods to work with Job resources.
|
||||
type JobInterface interface {
|
||||
Create(*v1beta1.Job) (*v1beta1.Job, error)
|
||||
Update(*v1beta1.Job) (*v1beta1.Job, error)
|
||||
UpdateStatus(*v1beta1.Job) (*v1beta1.Job, error)
|
||||
Delete(name string, options *v1.DeleteOptions) error
|
||||
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
|
||||
Get(name string, options meta_v1.GetOptions) (*v1beta1.Job, error)
|
||||
List(opts v1.ListOptions) (*v1beta1.JobList, error)
|
||||
Watch(opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.Job, err error)
|
||||
JobExpansion
|
||||
}
|
||||
|
||||
// jobs implements JobInterface
|
||||
type jobs struct {
|
||||
client restclient.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newJobs returns a Jobs
|
||||
func newJobs(c *ExtensionsV1beta1Client, namespace string) *jobs {
|
||||
return &jobs{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any.
|
||||
func (c *jobs) Create(job *v1beta1.Job) (result *v1beta1.Job, err error) {
|
||||
result = &v1beta1.Job{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("jobs").
|
||||
Body(job).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any.
|
||||
func (c *jobs) Update(job *v1beta1.Job) (result *v1beta1.Job, err error) {
|
||||
result = &v1beta1.Job{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("jobs").
|
||||
Name(job.Name).
|
||||
Body(job).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *jobs) UpdateStatus(job *v1beta1.Job) (result *v1beta1.Job, err error) {
|
||||
result = &v1beta1.Job{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("jobs").
|
||||
Name(job.Name).
|
||||
SubResource("status").
|
||||
Body(job).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the job and deletes it. Returns an error if one occurs.
|
||||
func (c *jobs) Delete(name string, options *v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("jobs").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *jobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("jobs").
|
||||
VersionedParams(&listOptions, api.ParameterCodec).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Get takes name of the job, and returns the corresponding job object, and an error if there is any.
|
||||
func (c *jobs) Get(name string, options meta_v1.GetOptions) (result *v1beta1.Job, err error) {
|
||||
result = &v1beta1.Job{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("jobs").
|
||||
Name(name).
|
||||
VersionedParams(&options, api.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Jobs that match those selectors.
|
||||
func (c *jobs) List(opts v1.ListOptions) (result *v1beta1.JobList, err error) {
|
||||
result = &v1beta1.JobList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("jobs").
|
||||
VersionedParams(&opts, api.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested jobs.
|
||||
func (c *jobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.client.Get().
|
||||
Prefix("watch").
|
||||
Namespace(c.ns).
|
||||
Resource("jobs").
|
||||
VersionedParams(&opts, api.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched job.
|
||||
func (c *jobs) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.Job, err error) {
|
||||
result = &v1beta1.Job{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("jobs").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was automatically generated by informer-gen with arguments: --input-dirs=[k8s.io/kubernetes/pkg/api,k8s.io/kubernetes/pkg/api/v1,k8s.io/kubernetes/pkg/apis/abac,k8s.io/kubernetes/pkg/apis/abac/v0,k8s.io/kubernetes/pkg/apis/abac/v1beta1,k8s.io/kubernetes/pkg/apis/apps,k8s.io/kubernetes/pkg/apis/apps/v1beta1,k8s.io/kubernetes/pkg/apis/authentication,k8s.io/kubernetes/pkg/apis/authentication/v1beta1,k8s.io/kubernetes/pkg/apis/authorization,k8s.io/kubernetes/pkg/apis/authorization/v1beta1,k8s.io/kubernetes/pkg/apis/autoscaling,k8s.io/kubernetes/pkg/apis/autoscaling/v1,k8s.io/kubernetes/pkg/apis/batch,k8s.io/kubernetes/pkg/apis/batch/v1,k8s.io/kubernetes/pkg/apis/batch/v2alpha1,k8s.io/kubernetes/pkg/apis/certificates,k8s.io/kubernetes/pkg/apis/certificates/v1alpha1,k8s.io/kubernetes/pkg/apis/componentconfig,k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1,k8s.io/kubernetes/pkg/apis/extensions,k8s.io/kubernetes/pkg/apis/extensions/v1beta1,k8s.io/kubernetes/pkg/apis/imagepolicy,k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1,k8s.io/kubernetes/pkg/apis/meta/v1,k8s.io/kubernetes/pkg/apis/policy,k8s.io/kubernetes/pkg/apis/policy/v1beta1,k8s.io/kubernetes/pkg/apis/rbac,k8s.io/kubernetes/pkg/apis/rbac/v1alpha1,k8s.io/kubernetes/pkg/apis/storage,k8s.io/kubernetes/pkg/apis/storage/v1beta1] --internal-clientset-package=k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset --listers-package=k8s.io/kubernetes/pkg/client/listers --versioned-clientset-package=k8s.io/kubernetes/pkg/client/clientset_generated/clientset
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions_v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
cache "k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalinterfaces"
|
||||
v1beta1 "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
|
||||
runtime "k8s.io/kubernetes/pkg/runtime"
|
||||
watch "k8s.io/kubernetes/pkg/watch"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// JobInformer provides access to a shared informer and lister for
|
||||
// Jobs.
|
||||
type JobInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta1.JobLister
|
||||
}
|
||||
|
||||
type jobInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
}
|
||||
|
||||
func newJobInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.ExtensionsV1beta1().Jobs(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.ExtensionsV1beta1().Jobs(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions_v1beta1.Job{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
func (f *jobInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.VersionedInformerFor(&extensions_v1beta1.Job{}, newJobInformer)
|
||||
}
|
||||
|
||||
func (f *jobInformer) Lister() v1beta1.JobLister {
|
||||
return v1beta1.NewJobLister(f.Informer().GetIndexer())
|
||||
}
|
|
@ -200,9 +200,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
|
|||
return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Deployments().Informer()}, nil
|
||||
case extensions_v1beta1.SchemeGroupVersion.WithResource("ingresses"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil
|
||||
case extensions_v1beta1.SchemeGroupVersion.WithResource("jobs"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Jobs().Informer()}, nil
|
||||
case extensions_v1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"):
|
||||
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil
|
||||
case extensions_v1beta1.SchemeGroupVersion.WithResource("replicasets"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil
|
||||
|
|
|
@ -1,95 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was automatically generated by lister-gen with arguments: --input-dirs=[k8s.io/kubernetes/pkg/api,k8s.io/kubernetes/pkg/api/v1,k8s.io/kubernetes/pkg/apis/abac,k8s.io/kubernetes/pkg/apis/abac/v0,k8s.io/kubernetes/pkg/apis/abac/v1beta1,k8s.io/kubernetes/pkg/apis/apps,k8s.io/kubernetes/pkg/apis/apps/v1beta1,k8s.io/kubernetes/pkg/apis/authentication,k8s.io/kubernetes/pkg/apis/authentication/v1beta1,k8s.io/kubernetes/pkg/apis/authorization,k8s.io/kubernetes/pkg/apis/authorization/v1beta1,k8s.io/kubernetes/pkg/apis/autoscaling,k8s.io/kubernetes/pkg/apis/autoscaling/v1,k8s.io/kubernetes/pkg/apis/batch,k8s.io/kubernetes/pkg/apis/batch/v1,k8s.io/kubernetes/pkg/apis/batch/v2alpha1,k8s.io/kubernetes/pkg/apis/certificates,k8s.io/kubernetes/pkg/apis/certificates/v1alpha1,k8s.io/kubernetes/pkg/apis/componentconfig,k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1,k8s.io/kubernetes/pkg/apis/extensions,k8s.io/kubernetes/pkg/apis/extensions/v1beta1,k8s.io/kubernetes/pkg/apis/imagepolicy,k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1,k8s.io/kubernetes/pkg/apis/meta/v1,k8s.io/kubernetes/pkg/apis/policy,k8s.io/kubernetes/pkg/apis/policy/v1alpha1,k8s.io/kubernetes/pkg/apis/policy/v1beta1,k8s.io/kubernetes/pkg/apis/rbac,k8s.io/kubernetes/pkg/apis/rbac/v1alpha1,k8s.io/kubernetes/pkg/apis/storage,k8s.io/kubernetes/pkg/apis/storage/v1beta1]
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
// JobLister helps list Jobs.
|
||||
type JobLister interface {
|
||||
// List lists all Jobs in the indexer.
|
||||
List(selector labels.Selector) (ret []*v1beta1.Job, err error)
|
||||
// Jobs returns an object that can list and get Jobs.
|
||||
Jobs(namespace string) JobNamespaceLister
|
||||
JobListerExpansion
|
||||
}
|
||||
|
||||
// jobLister implements the JobLister interface.
|
||||
type jobLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewJobLister returns a new JobLister.
|
||||
func NewJobLister(indexer cache.Indexer) JobLister {
|
||||
return &jobLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all Jobs in the indexer.
|
||||
func (s *jobLister) List(selector labels.Selector) (ret []*v1beta1.Job, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1beta1.Job))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Jobs returns an object that can list and get Jobs.
|
||||
func (s *jobLister) Jobs(namespace string) JobNamespaceLister {
|
||||
return jobNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
// JobNamespaceLister helps list and get Jobs.
|
||||
type JobNamespaceLister interface {
|
||||
// List lists all Jobs in the indexer for a given namespace.
|
||||
List(selector labels.Selector) (ret []*v1beta1.Job, err error)
|
||||
// Get retrieves the Job from the indexer for a given namespace and name.
|
||||
Get(name string) (*v1beta1.Job, error)
|
||||
JobNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// jobNamespaceLister implements the JobNamespaceLister
|
||||
// interface.
|
||||
type jobNamespaceLister struct {
|
||||
indexer cache.Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
// List lists all Jobs in the indexer for a given namespace.
|
||||
func (s jobNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Job, err error) {
|
||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1beta1.Job))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the Job from the indexer for a given namespace and name.
|
||||
func (s jobNamespaceLister) Get(name string) (*v1beta1.Job, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(extensions.Resource("job"), name)
|
||||
}
|
||||
return obj.(*v1beta1.Job), nil
|
||||
}
|
|
@ -42,7 +42,7 @@ func TestGetJobFromTemplate(t *testing.T) {
|
|||
Name: "mycronjob",
|
||||
Namespace: "snazzycats",
|
||||
UID: types.UID("1a2b3c"),
|
||||
SelfLink: "/apis/extensions/v1beta1/namespaces/snazzycats/jobs/mycronjob",
|
||||
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/mycronjob",
|
||||
},
|
||||
Spec: batch.CronJobSpec{
|
||||
Schedule: "* * * * ?",
|
||||
|
@ -90,7 +90,7 @@ func TestGetJobFromTemplate(t *testing.T) {
|
|||
if !ok {
|
||||
t.Errorf("Missing created-by annotation")
|
||||
}
|
||||
expectedCreatedBy := `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"snazzycats","name":"mycronjob","uid":"1a2b3c","apiVersion":"extensions"}}
|
||||
expectedCreatedBy := `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"snazzycats","name":"mycronjob","uid":"1a2b3c","apiVersion":"batch"}}
|
||||
`
|
||||
if len(v) != len(expectedCreatedBy) {
|
||||
t.Errorf("Wrong length for created-by annotation, expected %v got %v", len(expectedCreatedBy), len(v))
|
||||
|
|
|
@ -112,10 +112,10 @@ func TestParseRuntimeConfig(t *testing.T) {
|
|||
err: false,
|
||||
},
|
||||
{
|
||||
// Enable deployments and disable jobs.
|
||||
// Enable deployments and disable daemonsets.
|
||||
runtimeConfig: map[string]string{
|
||||
"extensions/v1beta1/anything": "true",
|
||||
"extensions/v1beta1/jobs": "false",
|
||||
"extensions/v1beta1/anything": "true",
|
||||
"extensions/v1beta1/daemonsets": "false",
|
||||
},
|
||||
defaultResourceConfig: func() *ResourceConfig {
|
||||
config := NewResourceConfig()
|
||||
|
@ -126,7 +126,7 @@ func TestParseRuntimeConfig(t *testing.T) {
|
|||
expectedAPIConfig: func() *ResourceConfig {
|
||||
config := NewResourceConfig()
|
||||
config.EnableVersions(extensionsGroupVersion)
|
||||
config.DisableResources(extensionsGroupVersion.WithResource("jobs"))
|
||||
config.DisableResources(extensionsGroupVersion.WithResource("daemonsets"))
|
||||
config.EnableResources(extensionsGroupVersion.WithResource("anything"))
|
||||
return config
|
||||
},
|
||||
|
|
|
@ -282,7 +282,7 @@ func TestDrain(t *testing.T) {
|
|||
Name: "job",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: metav1.Time{Time: time.Now()},
|
||||
SelfLink: "/apis/extensions/v1beta1/namespaces/default/jobs/job",
|
||||
SelfLink: "/apis/batch/v1/namespaces/default/jobs/job",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: labels},
|
||||
|
@ -525,7 +525,7 @@ func TestDrain(t *testing.T) {
|
|||
case m.isFor("GET", "/namespaces/default/daemonsets/ds"):
|
||||
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(testapi.Extensions.Codec(), &ds)}, nil
|
||||
case m.isFor("GET", "/namespaces/default/jobs/job"):
|
||||
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(testapi.Extensions.Codec(), &job)}, nil
|
||||
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(testapi.Batch.Codec(), &job)}, nil
|
||||
case m.isFor("GET", "/namespaces/default/replicasets/rs"):
|
||||
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(testapi.Extensions.Codec(), &test.replicaSets[0])}, nil
|
||||
case m.isFor("GET", "/namespaces/default/pods/bar"):
|
||||
|
|
|
@ -220,8 +220,6 @@ func Run(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cobr
|
|||
case api.RestartPolicyOnFailure:
|
||||
if contains(resourcesList, batchv1.SchemeGroupVersion.WithResource("jobs")) {
|
||||
generatorName = "job/v1"
|
||||
} else if contains(resourcesList, v1beta1.SchemeGroupVersion.WithResource("jobs")) {
|
||||
generatorName = "job/v1beta1"
|
||||
} else {
|
||||
generatorName = "run-pod/v1"
|
||||
}
|
||||
|
@ -229,9 +227,6 @@ func Run(f cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cobr
|
|||
generatorName = "run-pod/v1"
|
||||
}
|
||||
}
|
||||
if generatorName == "job/v1beta1" {
|
||||
fmt.Fprintf(cmdErr, "DEPRECATED: --generator=job/v1beta1 is deprecated, use job/v1 instead.\n")
|
||||
}
|
||||
generators := f.Generators("run")
|
||||
generator, found := generators[generatorName]
|
||||
if !found {
|
||||
|
|
|
@ -442,7 +442,6 @@ const (
|
|||
HorizontalPodAutoscalerV1GeneratorName = "horizontalpodautoscaler/v1"
|
||||
DeploymentV1Beta1GeneratorName = "deployment/v1beta1"
|
||||
DeploymentBasicV1Beta1GeneratorName = "deployment-basic/v1beta1"
|
||||
JobV1Beta1GeneratorName = "job/v1beta1"
|
||||
JobV1GeneratorName = "job/v1"
|
||||
CronJobV2Alpha1GeneratorName = "cronjob/v2alpha1"
|
||||
ScheduledJobV2Alpha1GeneratorName = "scheduledjob/v2alpha1"
|
||||
|
@ -487,7 +486,6 @@ func DefaultGenerators(cmdName string) map[string]kubectl.Generator {
|
|||
RunV1GeneratorName: kubectl.BasicReplicationController{},
|
||||
RunPodV1GeneratorName: kubectl.BasicPod{},
|
||||
DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{},
|
||||
JobV1Beta1GeneratorName: kubectl.JobV1Beta1{},
|
||||
JobV1GeneratorName: kubectl.JobV1{},
|
||||
ScheduledJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{},
|
||||
CronJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{},
|
||||
|
|
|
@ -106,7 +106,7 @@ var userResources = []schema.GroupResource{
|
|||
{Group: "", Resource: "services"},
|
||||
{Group: "apps", Resource: "statefulsets"},
|
||||
{Group: "autoscaling", Resource: "horizontalpodautoscalers"},
|
||||
{Group: "extensions", Resource: "jobs"},
|
||||
{Group: "batch", Resource: "jobs"},
|
||||
{Group: "extensions", Resource: "deployments"},
|
||||
{Group: "extensions", Resource: "replicasets"},
|
||||
}
|
||||
|
|
|
@ -142,7 +142,6 @@ func describerMap(c clientset.Interface) map[schema.GroupKind]Describer {
|
|||
autoscaling.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c},
|
||||
extensions.Kind("DaemonSet"): &DaemonSetDescriber{c},
|
||||
extensions.Kind("Deployment"): &DeploymentDescriber{c, versionedClientsetForDeployment(c)},
|
||||
extensions.Kind("Job"): &JobDescriber{c},
|
||||
extensions.Kind("Ingress"): &IngressDescriber{c},
|
||||
batch.Kind("Job"): &JobDescriber{c},
|
||||
batch.Kind("CronJob"): &CronJobDescriber{c},
|
||||
|
|
|
@ -23,10 +23,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
|
@ -193,27 +190,9 @@ func getEnvs(genericParams map[string]interface{}) ([]api.EnvVar, error) {
|
|||
return envs, nil
|
||||
}
|
||||
|
||||
func getV1Envs(genericParams map[string]interface{}) ([]v1.EnvVar, error) {
|
||||
var envs []v1.EnvVar
|
||||
envStrings, found := genericParams["env"]
|
||||
if found {
|
||||
if envStringArray, isArray := envStrings.([]string); isArray {
|
||||
var err error
|
||||
envs, err = parseV1Envs(envStringArray)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(genericParams, "env")
|
||||
} else {
|
||||
return nil, fmt.Errorf("expected []string, found: %v", envStrings)
|
||||
}
|
||||
}
|
||||
return envs, nil
|
||||
}
|
||||
type JobV1 struct{}
|
||||
|
||||
type JobV1Beta1 struct{}
|
||||
|
||||
func (JobV1Beta1) ParamNames() []GeneratorParam {
|
||||
func (JobV1) ParamNames() []GeneratorParam {
|
||||
return []GeneratorParam{
|
||||
{"labels", false},
|
||||
{"default-name", false},
|
||||
|
@ -234,7 +213,7 @@ func (JobV1Beta1) ParamNames() []GeneratorParam {
|
|||
}
|
||||
}
|
||||
|
||||
func (JobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
|
||||
func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
|
||||
args, err := getArgs(genericParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -292,10 +271,6 @@ func (JobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object
|
|||
Labels: labels,
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
ManualSelector: newBool(true),
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: labels,
|
||||
|
@ -308,99 +283,6 @@ func (JobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object
|
|||
return &job, nil
|
||||
}
|
||||
|
||||
type JobV1 struct{}
|
||||
|
||||
func (JobV1) ParamNames() []GeneratorParam {
|
||||
return []GeneratorParam{
|
||||
{"labels", false},
|
||||
{"default-name", false},
|
||||
{"name", true},
|
||||
{"image", true},
|
||||
{"image-pull-policy", false},
|
||||
{"port", false},
|
||||
{"hostport", false},
|
||||
{"stdin", false},
|
||||
{"leave-stdin-open", false},
|
||||
{"tty", false},
|
||||
{"command", false},
|
||||
{"args", false},
|
||||
{"env", false},
|
||||
{"requests", false},
|
||||
{"limits", false},
|
||||
{"restart", false},
|
||||
}
|
||||
}
|
||||
|
||||
func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
|
||||
args, err := getArgs(genericParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
envs, err := getV1Envs(genericParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params, err := getParams(genericParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
name, err := getName(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labels, err := getLabels(params, true, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podSpec, err := makeV1PodSpec(params, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imagePullPolicy := v1.PullPolicy(params["image-pull-policy"])
|
||||
if err = updateV1PodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin
|
||||
|
||||
if err := updateV1PodPorts(params, podSpec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
restartPolicy := v1.RestartPolicy(params["restart"])
|
||||
if len(restartPolicy) == 0 {
|
||||
restartPolicy = v1.RestartPolicyNever
|
||||
}
|
||||
podSpec.RestartPolicy = restartPolicy
|
||||
|
||||
job := batchv1.Job{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: *podSpec,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &job, nil
|
||||
}
|
||||
|
||||
type CronJobV2Alpha1 struct{}
|
||||
|
||||
func (CronJobV2Alpha1) ParamNames() []GeneratorParam {
|
||||
|
@ -431,7 +313,7 @@ func (CronJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.O
|
|||
return nil, err
|
||||
}
|
||||
|
||||
envs, err := getV1Envs(genericParams)
|
||||
envs, err := getEnvs(genericParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -451,13 +333,13 @@ func (CronJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.O
|
|||
return nil, err
|
||||
}
|
||||
|
||||
podSpec, err := makeV1PodSpec(params, name)
|
||||
podSpec, err := makePodSpec(params, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imagePullPolicy := v1.PullPolicy(params["image-pull-policy"])
|
||||
if err = updateV1PodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
|
||||
imagePullPolicy := api.PullPolicy(params["image-pull-policy"])
|
||||
if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -467,28 +349,28 @@ func (CronJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.O
|
|||
}
|
||||
podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin
|
||||
|
||||
if err := updateV1PodPorts(params, podSpec); err != nil {
|
||||
if err := updatePodPorts(params, podSpec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
restartPolicy := v1.RestartPolicy(params["restart"])
|
||||
restartPolicy := api.RestartPolicy(params["restart"])
|
||||
if len(restartPolicy) == 0 {
|
||||
restartPolicy = v1.RestartPolicyNever
|
||||
restartPolicy = api.RestartPolicyNever
|
||||
}
|
||||
podSpec.RestartPolicy = restartPolicy
|
||||
|
||||
cronJob := batchv2alpha1.CronJob{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
cronJob := batch.CronJob{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: batchv2alpha1.CronJobSpec{
|
||||
Spec: batch.CronJobSpec{
|
||||
Schedule: params["schedule"],
|
||||
ConcurrencyPolicy: batchv2alpha1.AllowConcurrent,
|
||||
JobTemplate: batchv2alpha1.JobTemplateSpec{
|
||||
Spec: batchv2alpha1.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: *podSpec,
|
||||
|
@ -547,30 +429,6 @@ func populateResourceList(spec string) (api.ResourceList, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// populateResourceList takes strings of form <resourceName1>=<value1>,<resourceName1>=<value2>
|
||||
func populateV1ResourceList(spec string) (v1.ResourceList, error) {
|
||||
// empty input gets a nil response to preserve generator test expected behaviors
|
||||
if spec == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
result := v1.ResourceList{}
|
||||
resourceStatements := strings.Split(spec, ",")
|
||||
for _, resourceStatement := range resourceStatements {
|
||||
parts := strings.Split(resourceStatement, "=")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("Invalid argument syntax %v, expected <resource>=<value>", resourceStatement)
|
||||
}
|
||||
resourceName := v1.ResourceName(parts[0])
|
||||
resourceQuantity, err := resource.ParseQuantity(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result[resourceName] = resourceQuantity
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// HandleResourceRequirements parses the limits and requests parameters if specified
|
||||
func HandleResourceRequirements(params map[string]string) (api.ResourceRequirements, error) {
|
||||
result := api.ResourceRequirements{}
|
||||
|
@ -587,22 +445,6 @@ func HandleResourceRequirements(params map[string]string) (api.ResourceRequireme
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// HandleResourceRequirements parses the limits and requests parameters if specified
|
||||
func handleV1ResourceRequirements(params map[string]string) (v1.ResourceRequirements, error) {
|
||||
result := v1.ResourceRequirements{}
|
||||
limits, err := populateV1ResourceList(params["limits"])
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result.Limits = limits
|
||||
requests, err := populateV1ResourceList(params["requests"])
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result.Requests = requests
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func makePodSpec(params map[string]string, name string) (*api.PodSpec, error) {
|
||||
stdin, err := GetBool(params, "stdin", false)
|
||||
if err != nil {
|
||||
|
@ -633,36 +475,6 @@ func makePodSpec(params map[string]string, name string) (*api.PodSpec, error) {
|
|||
return &spec, nil
|
||||
}
|
||||
|
||||
func makeV1PodSpec(params map[string]string, name string) (*v1.PodSpec, error) {
|
||||
stdin, err := GetBool(params, "stdin", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tty, err := GetBool(params, "tty", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceRequirements, err := handleV1ResourceRequirements(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spec := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: params["image"],
|
||||
Stdin: stdin,
|
||||
TTY: tty,
|
||||
Resources: resourceRequirements,
|
||||
},
|
||||
},
|
||||
}
|
||||
return &spec, nil
|
||||
}
|
||||
|
||||
func (BasicReplicationController) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
|
||||
args, err := getArgs(genericParams)
|
||||
if err != nil {
|
||||
|
@ -751,30 +563,6 @@ func updatePodContainers(params map[string]string, args []string, envs []api.Env
|
|||
return nil
|
||||
}
|
||||
|
||||
func updateV1PodContainers(params map[string]string, args []string, envs []v1.EnvVar, imagePullPolicy v1.PullPolicy, podSpec *v1.PodSpec) error {
|
||||
if len(args) > 0 {
|
||||
command, err := GetBool(params, "command", false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if command {
|
||||
podSpec.Containers[0].Command = args
|
||||
} else {
|
||||
podSpec.Containers[0].Args = args
|
||||
}
|
||||
}
|
||||
|
||||
if len(envs) > 0 {
|
||||
podSpec.Containers[0].Env = envs
|
||||
}
|
||||
|
||||
if len(imagePullPolicy) > 0 {
|
||||
// imagePullPolicy should be valid here since we have verified it before.
|
||||
podSpec.Containers[0].ImagePullPolicy = imagePullPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updatePodPorts(params map[string]string, podSpec *api.PodSpec) (err error) {
|
||||
port := -1
|
||||
hostPort := -1
|
||||
|
@ -809,40 +597,6 @@ func updatePodPorts(params map[string]string, podSpec *api.PodSpec) (err error)
|
|||
return nil
|
||||
}
|
||||
|
||||
func updateV1PodPorts(params map[string]string, podSpec *v1.PodSpec) (err error) {
|
||||
port := -1
|
||||
hostPort := -1
|
||||
if len(params["port"]) > 0 {
|
||||
port, err = strconv.Atoi(params["port"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(params["hostport"]) > 0 {
|
||||
hostPort, err = strconv.Atoi(params["hostport"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hostPort > 0 && port < 0 {
|
||||
return fmt.Errorf("--hostport requires --port to be specified")
|
||||
}
|
||||
}
|
||||
|
||||
// Don't include the port if it was not specified.
|
||||
if len(params["port"]) > 0 {
|
||||
podSpec.Containers[0].Ports = []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: int32(port),
|
||||
},
|
||||
}
|
||||
if hostPort > 0 {
|
||||
podSpec.Containers[0].Ports[0].HostPort = int32(hostPort)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type BasicPod struct{}
|
||||
|
||||
func (BasicPod) ParamNames() []GeneratorParam {
|
||||
|
@ -970,24 +724,6 @@ func parseEnvs(envArray []string) ([]api.EnvVar, error) {
|
|||
return envs, nil
|
||||
}
|
||||
|
||||
func parseV1Envs(envArray []string) ([]v1.EnvVar, error) {
|
||||
envs := []v1.EnvVar{}
|
||||
for _, env := range envArray {
|
||||
pos := strings.Index(env, "=")
|
||||
if pos == -1 {
|
||||
return nil, fmt.Errorf("invalid env: %v", env)
|
||||
}
|
||||
name := env[:pos]
|
||||
value := env[pos+1:]
|
||||
if len(name) == 0 || len(validation.IsCIdentifier(name)) != 0 {
|
||||
return nil, fmt.Errorf("invalid env: %v", env)
|
||||
}
|
||||
envVar := v1.EnvVar{Name: name, Value: value}
|
||||
envs = append(envs, envVar)
|
||||
}
|
||||
return envs, nil
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
p := new(bool)
|
||||
*p = val
|
||||
|
|
|
@ -758,10 +758,6 @@ func TestGenerateJob(t *testing.T) {
|
|||
Labels: map[string]string{"foo": "bar", "baz": "blah"},
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar", "baz": "blah"},
|
||||
},
|
||||
ManualSelector: newBool(true),
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{"foo": "bar", "baz": "blah"},
|
||||
|
@ -810,7 +806,7 @@ func TestGenerateJob(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
generator := JobV1Beta1{}
|
||||
generator := JobV1{}
|
||||
for _, test := range tests {
|
||||
obj, err := generator.Generate(test.params)
|
||||
if !test.expectErr && err != nil {
|
||||
|
|
|
@ -56,7 +56,7 @@ func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, er
|
|||
return &ReplicationControllerScaler{c.Core()}, nil
|
||||
case extensions.Kind("ReplicaSet"):
|
||||
return &ReplicaSetScaler{c.Extensions()}, nil
|
||||
case extensions.Kind("Job"), batch.Kind("Job"):
|
||||
case batch.Kind("Job"):
|
||||
return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface.
|
||||
case apps.Kind("StatefulSet"):
|
||||
return &StatefulSetScaler{c.Apps()}, nil
|
||||
|
|
|
@ -85,7 +85,7 @@ func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, er
|
|||
case api.Kind("Service"):
|
||||
return &ServiceReaper{c.Core()}, nil
|
||||
|
||||
case extensions.Kind("Job"), batch.Kind("Job"):
|
||||
case batch.Kind("Job"):
|
||||
return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil
|
||||
|
||||
case apps.Kind("StatefulSet"):
|
||||
|
|
|
@ -405,7 +405,6 @@ func DefaultAPIResourceConfigSource() *genericapiserver.ResourceConfig {
|
|||
extensionsapiv1beta1.SchemeGroupVersion.WithResource("deployments"),
|
||||
extensionsapiv1beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"),
|
||||
extensionsapiv1beta1.SchemeGroupVersion.WithResource("ingresses"),
|
||||
extensionsapiv1beta1.SchemeGroupVersion.WithResource("jobs"),
|
||||
extensionsapiv1beta1.SchemeGroupVersion.WithResource("networkpolicies"),
|
||||
extensionsapiv1beta1.SchemeGroupVersion.WithResource("replicasets"),
|
||||
extensionsapiv1beta1.SchemeGroupVersion.WithResource("thirdpartyresources"),
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
@ -32,7 +31,7 @@ import (
|
|||
)
|
||||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName)
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, batch.GroupName)
|
||||
restOptions := generic.RESTOptions{
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
|
|
|
@ -223,7 +223,7 @@ func TestJobStatusStrategy(t *testing.T) {
|
|||
|
||||
func TestSelectableFieldLabelConversions(t *testing.T) {
|
||||
apitesting.TestSelectableFieldLabelConversionsOfKind(t,
|
||||
testapi.Extensions.GroupVersion().String(),
|
||||
testapi.Batch.GroupVersion().String(),
|
||||
"Job",
|
||||
JobToSelectableFields(&batch.Job{}),
|
||||
nil,
|
||||
|
|
|
@ -148,8 +148,8 @@ func init() {
|
|||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "job-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup, extensionsGroup).Resources("jobs").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(batchGroup, extensionsGroup).Resources("jobs/status").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("jobs").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
|
|
|
@ -114,7 +114,7 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
|
||||
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("jobs", "daemonsets", "horizontalpodautoscalers",
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "horizontalpodautoscalers",
|
||||
"replicationcontrollers/scale", "replicasets", "replicasets/scale", "deployments", "deployments/scale").RuleOrDie(),
|
||||
|
||||
// additional admin powers
|
||||
|
@ -144,7 +144,7 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
|
||||
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("jobs", "daemonsets", "horizontalpodautoscalers",
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "horizontalpodautoscalers",
|
||||
"replicationcontrollers/scale", "replicasets", "replicasets/scale", "deployments", "deployments/scale").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
|
@ -167,7 +167,7 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
|
||||
rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("jobs", "daemonsets", "horizontalpodautoscalers",
|
||||
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "horizontalpodautoscalers",
|
||||
"replicationcontrollers/scale", "replicasets", "replicasets/scale", "deployments", "deployments/scale").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
|
|
|
@ -133,7 +133,6 @@ items:
|
|||
- deployments
|
||||
- deployments/scale
|
||||
- horizontalpodautoscalers
|
||||
- jobs
|
||||
- replicasets
|
||||
- replicasets/scale
|
||||
- replicationcontrollers/scale
|
||||
|
@ -321,7 +320,6 @@ items:
|
|||
- deployments
|
||||
- deployments/scale
|
||||
- horizontalpodautoscalers
|
||||
- jobs
|
||||
- replicasets
|
||||
- replicasets/scale
|
||||
- replicationcontrollers/scale
|
||||
|
@ -689,7 +687,6 @@ items:
|
|||
- deployments
|
||||
- deployments/scale
|
||||
- horizontalpodautoscalers
|
||||
- jobs
|
||||
- replicasets
|
||||
- replicasets/scale
|
||||
- replicationcontrollers/scale
|
||||
|
|
|
@ -440,7 +440,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- batch
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- jobs
|
||||
|
@ -451,7 +450,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- jobs/status
|
||||
|
|
|
@ -1,342 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file is very similar to ./job.go. That one uses extensions/v1beta1, this one
|
||||
// uses batch/v1. That one uses ManualSelectors, this one does not. Keep them in sync.
|
||||
// Delete that one when Job removed from extensions/v1beta1.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// How long to wait for a job to finish.
|
||||
v1JobTimeout = 15 * time.Minute
|
||||
|
||||
// Job selector name
|
||||
v1JobSelectorKey = "job-name"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("V1Job", func() {
|
||||
f := framework.NewDefaultFramework("v1job")
|
||||
parallelism := int32(2)
|
||||
completions := int32(4)
|
||||
lotsOfFailures := int32(5) // more than completions
|
||||
|
||||
// Simplest case: all pods succeed promptly
|
||||
It("should run a job to completion when tasks succeed", func() {
|
||||
By("Creating a job")
|
||||
job := newTestV1Job("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions)
|
||||
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = waitForV1JobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Pods sometimes fail, but eventually succeed.
|
||||
It("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
|
||||
By("Creating a job")
|
||||
// One failure, then a success, local restarts.
|
||||
// We can't use the random failure approach used by the
|
||||
// non-local test below, because kubelet will throttle
|
||||
// frequently failing containers in a given pod, ramping
|
||||
// up to 5 minutes between restarts, making test timeouts
|
||||
// due to successive failures too likely with a reasonable
|
||||
// test timeout.
|
||||
job := newTestV1Job("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions)
|
||||
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = waitForV1JobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Pods sometimes fail, but eventually succeed, after pod restarts
|
||||
It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
|
||||
By("Creating a job")
|
||||
// 50% chance of container success, local restarts.
|
||||
// Can't use the failOnce approach because that relies
|
||||
// on an emptyDir, which is not preserved across new pods.
|
||||
// Worst case analysis: 15 failures, each taking 1 minute to
|
||||
// run due to some slowness, 1 in 2^15 chance of happening,
|
||||
// causing test flake. Should be very rare.
|
||||
job := newTestV1Job("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions)
|
||||
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = waitForV1JobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should keep restarting failed pods", func() {
|
||||
By("Creating a job")
|
||||
job := newTestV1Job("fail", "all-fail", v1.RestartPolicyNever, parallelism, completions)
|
||||
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job shows many failures")
|
||||
err = wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
|
||||
curr, err := getV1Job(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return curr.Status.Failed > lotsOfFailures, nil
|
||||
})
|
||||
})
|
||||
|
||||
It("should scale a job up", func() {
|
||||
startParallelism := int32(1)
|
||||
endParallelism := int32(2)
|
||||
By("Creating a job")
|
||||
job := newTestV1Job("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions)
|
||||
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring active pods == startParallelism")
|
||||
err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scale job up")
|
||||
scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
|
||||
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
|
||||
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring active pods == endParallelism")
|
||||
err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should scale a job down", func() {
|
||||
startParallelism := int32(2)
|
||||
endParallelism := int32(1)
|
||||
By("Creating a job")
|
||||
job := newTestV1Job("notTerminate", "scale-down", v1.RestartPolicyNever, startParallelism, completions)
|
||||
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring active pods == startParallelism")
|
||||
err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scale job down")
|
||||
scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
|
||||
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
|
||||
err = scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring active pods == endParallelism")
|
||||
err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should delete a job", func() {
|
||||
By("Creating a job")
|
||||
job := newTestV1Job("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
|
||||
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring active pods == parallelism")
|
||||
err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("delete a job")
|
||||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
_, err = getV1Job(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should fail a job", func() {
|
||||
By("Creating a job")
|
||||
job := newTestV1Job("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
|
||||
activeDeadlineSeconds := int64(10)
|
||||
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
|
||||
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job was failed")
|
||||
err = waitForV1JobFail(f.ClientSet, f.Namespace.Name, job.Name, 20*time.Second)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
job, err = getV1Job(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// the job stabilized and won't be synced until modification or full
|
||||
// resync happens, we don't want to wait for the latter so we force
|
||||
// sync modifying it
|
||||
_, err = framework.UpdateJobWithRetries(f.ClientSet, f.Namespace.Name, job.Name, func(update *batch.Job) {
|
||||
update.Spec.Parallelism = &completions
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForV1JobFail(f.ClientSet, f.Namespace.Name, job.Name, v1JobTimeout)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
// newTestV1Job returns a job which does one of several testing behaviors.
|
||||
func newTestV1Job(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32) *batch.Job {
|
||||
job := &batch.Job{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: ¶llelism,
|
||||
Completions: &completions,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"somekey": "somevalue"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: rPol,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "data",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "/data",
|
||||
Name: "data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
switch behavior {
|
||||
case "notTerminate":
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
|
||||
case "fail":
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"}
|
||||
case "succeed":
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
|
||||
case "randomlySucceedOrFail":
|
||||
// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
|
||||
// Dividing by 16384 gives roughly 50/50 chance of success.
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
|
||||
case "failOnce":
|
||||
// Fail the first the container of the pod is run, and
|
||||
// succeed the second time. Checks for file on emptydir.
|
||||
// If present, succeed. If not, create but fail.
|
||||
// Note that this cannot be used with RestartNever because
|
||||
// it always fails the first time for a pod.
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "if [[ -r /data/foo ]] ; then exit 0 ; else touch /data/foo ; exit 1 ; fi"}
|
||||
}
|
||||
return job
|
||||
}
|
||||
|
||||
func getV1Job(c clientset.Interface, ns, name string) (*batch.Job, error) {
|
||||
return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func createV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
return c.Batch().Jobs(ns).Create(job)
|
||||
}
|
||||
|
||||
func updateV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
return c.Batch().Jobs(ns).Update(job)
|
||||
}
|
||||
|
||||
func deleteV1Job(c clientset.Interface, ns, name string) error {
|
||||
return c.Batch().Jobs(ns).Delete(name, v1.NewDeleteOptions(0))
|
||||
}
|
||||
|
||||
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
|
||||
func waitForAllPodsRunningV1(c clientset.Interface, ns, jobName string, parallelism int32) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName}))
|
||||
return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
count := int32(0)
|
||||
for _, p := range pods.Items {
|
||||
if p.Status.Phase == v1.PodRunning {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count == parallelism, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for job to reach completions.
|
||||
func waitForV1JobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
|
||||
curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return curr.Status.Succeeded == completions, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for job fail.
|
||||
func waitForV1JobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
|
||||
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
||||
curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, c := range curr.Status.Conditions {
|
||||
if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
|
@ -40,8 +40,8 @@ func TestKubectlValidation(t *testing.T) {
|
|||
// The following test the experimental api.
|
||||
// TODO: Replace with something more robust. These may move.
|
||||
{`{"apiVersion": "extensions/v1beta1", "kind": "Ingress"}`, false},
|
||||
{`{"apiVersion": "extensions/v1beta1", "kind": "Job"}`, false},
|
||||
{`{"apiVersion": "vNotAVersion", "kind": "Job"}`, false},
|
||||
{`{"apiVersion": "extensions/v1beta1", "kind": "DaemonSet"}`, false},
|
||||
{`{"apiVersion": "vNotAVersion", "kind": "DaemonSet"}`, false},
|
||||
}
|
||||
components := framework.NewMasterComponents(&framework.Config{})
|
||||
defer components.Stop(true, true)
|
||||
|
|
|
@ -38,7 +38,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
clienttypedv1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
@ -170,146 +169,6 @@ func TestAutoscalingGroupBackwardCompatibility(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var jobV1beta1 string = `
|
||||
{
|
||||
"kind": "Job",
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"metadata": {
|
||||
"name": "pi",
|
||||
"labels": {
|
||||
"app": "pi"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"parallelism": 1,
|
||||
"completions": 1,
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"app": "pi"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "pi",
|
||||
"creationTimestamp": null,
|
||||
"labels": {
|
||||
"app": "pi"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "pi",
|
||||
"image": "perl",
|
||||
"command": [
|
||||
"perl",
|
||||
"-Mbignum=bpi",
|
||||
"-wle",
|
||||
"print bpi(2000)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Never"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var jobV1 string = `
|
||||
{
|
||||
"kind": "Job",
|
||||
"apiVersion": "batch/v1",
|
||||
"metadata": {
|
||||
"name": "pi"
|
||||
},
|
||||
"spec": {
|
||||
"parallelism": 1,
|
||||
"completions": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "pi",
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "pi",
|
||||
"image": "perl",
|
||||
"command": [
|
||||
"perl",
|
||||
"-Mbignum=bpi",
|
||||
"-wle",
|
||||
"print bpi(2000)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Never"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
// TestBatchGroupBackwardCompatibility is testing that batch/v1 and ext/v1beta1
|
||||
// Job share storage. This test can be deleted when Jobs is removed from ext/v1beta1,
|
||||
// (expected to happen in 1.4).
|
||||
func TestBatchGroupBackwardCompatibility(t *testing.T) {
|
||||
if *testapi.Batch.GroupVersion() == v2alpha1.SchemeGroupVersion {
|
||||
t.Skip("Shared job storage is not required for batch/v2alpha1.")
|
||||
}
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
transport := http.DefaultTransport
|
||||
|
||||
requests := []struct {
|
||||
verb string
|
||||
URL string
|
||||
body string
|
||||
expectedStatusCodes map[int]bool
|
||||
expectedVersion string
|
||||
}{
|
||||
// Post a v1 and get back both as v1beta1 and as v1.
|
||||
{"POST", batchPath("jobs", api.NamespaceDefault, ""), jobV1, integration.Code201, ""},
|
||||
{"GET", batchPath("jobs", api.NamespaceDefault, "pi"), "", integration.Code200, testapi.Batch.GroupVersion().String()},
|
||||
{"GET", extensionsPath("jobs", api.NamespaceDefault, "pi"), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
|
||||
{"DELETE", batchPath("jobs", api.NamespaceDefault, "pi"), "", integration.Code200, registered.GroupOrDie(api.GroupName).GroupVersion.String()}, // status response
|
||||
// Post a v1beta1 and get back both as v1beta1 and as v1.
|
||||
{"POST", extensionsPath("jobs", api.NamespaceDefault, ""), jobV1beta1, integration.Code201, ""},
|
||||
{"GET", batchPath("jobs", api.NamespaceDefault, "pi"), "", integration.Code200, testapi.Batch.GroupVersion().String()},
|
||||
{"GET", extensionsPath("jobs", api.NamespaceDefault, "pi"), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
|
||||
{"DELETE", extensionsPath("jobs", api.NamespaceDefault, "pi"), "", integration.Code200, registered.GroupOrDie(api.GroupName).GroupVersion.String()}, //status response
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
bodyBytes := bytes.NewReader([]byte(r.body))
|
||||
req, err := http.NewRequest(r.verb, s.URL+r.URL, bodyBytes)
|
||||
if err != nil {
|
||||
t.Logf("case %v", r)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
func() {
|
||||
resp, err := transport.RoundTrip(req)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
t.Logf("case %v", r)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
body := string(b)
|
||||
if _, ok := r.expectedStatusCodes[resp.StatusCode]; !ok {
|
||||
t.Logf("case %v", r)
|
||||
t.Errorf("Expected status one of %v, but got %v", r.expectedStatusCodes, resp.StatusCode)
|
||||
t.Errorf("Body: %v", body)
|
||||
}
|
||||
if !strings.Contains(body, "\"apiVersion\":\""+r.expectedVersion) {
|
||||
t.Logf("case %v", r)
|
||||
t.Errorf("Expected version %v, got body %v", r.expectedVersion, body)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccept(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
|
Loading…
Reference in New Issue