mirror of https://github.com/k3s-io/k3s
Remove kubectl scale job
parent
b49d429f64
commit
85d8b0ce3f
|
@ -1,11 +1,8 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"scale.go",
|
||||
"scalejob.go",
|
||||
],
|
||||
srcs = ["scale.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/scale",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
|
@ -13,38 +10,17 @@ go_library(
|
|||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubectl/util/i18n:go_default_library",
|
||||
"//pkg/kubectl/util/templates:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
|
||||
"//staging/src/k8s.io/cli-runtime/pkg/printers:go_default_library",
|
||||
"//staging/src/k8s.io/cli-runtime/pkg/resource:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library",
|
||||
"//vendor/github.com/spf13/cobra:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["scalejob_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
|
|
|
@ -24,13 +24,11 @@ import (
|
|||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
batchclient "k8s.io/client-go/kubernetes/typed/batch/v1"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
|
||||
|
@ -229,18 +227,8 @@ func (o *ScaleOptions) RunScale() error {
|
|||
}
|
||||
|
||||
mapping := info.ResourceMapping()
|
||||
if mapping.Resource.GroupResource() == (schema.GroupResource{Group: "batch", Resource: "jobs"}) {
|
||||
// go down the legacy jobs path. This can be removed in 3.14 For now, contain it.
|
||||
fmt.Fprintf(o.ErrOut, "%s scale job is DEPRECATED and will be removed in a future version.\n", o.parent)
|
||||
|
||||
if err := ScaleJob(info, o.clientSet.BatchV1(), uint(o.Replicas), precondition, retry, waitForReplicas); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
if err := o.scaler.Scale(info.Namespace, info.Name, uint(o.Replicas), precondition, retry, waitForReplicas, mapping.Resource.GroupResource()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := o.scaler.Scale(info.Namespace, info.Name, uint(o.Replicas), precondition, retry, waitForReplicas, mapping.Resource.GroupResource()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if the recorder makes a change, compute and create another patch
|
||||
|
@ -269,26 +257,6 @@ func (o *ScaleOptions) RunScale() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func ScaleJob(info *resource.Info, jobsClient batchclient.JobsGetter, count uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {
|
||||
scaler := JobPsuedoScaler{
|
||||
JobsClient: jobsClient,
|
||||
}
|
||||
var jobPreconditions *ScalePrecondition
|
||||
if preconditions != nil {
|
||||
jobPreconditions = &ScalePrecondition{Size: preconditions.Size, ResourceVersion: preconditions.ResourceVersion}
|
||||
}
|
||||
var jobRetry *RetryParams
|
||||
if retry != nil {
|
||||
jobRetry = &RetryParams{Interval: retry.Interval, Timeout: retry.Timeout}
|
||||
}
|
||||
var jobWaitForReplicas *RetryParams
|
||||
if waitForReplicas != nil {
|
||||
jobWaitForReplicas = &RetryParams{Interval: waitForReplicas.Interval, Timeout: waitForReplicas.Timeout}
|
||||
}
|
||||
|
||||
return scaler.Scale(info.Namespace, info.Name, count, jobPreconditions, jobRetry, jobWaitForReplicas)
|
||||
}
|
||||
|
||||
func scaler(f cmdutil.Factory) (kubectl.Scaler, error) {
|
||||
scalesGetter, err := cmdutil.ScaleClientFn(f)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,161 +0,0 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
batchclient "k8s.io/client-go/kubernetes/typed/batch/v1"
|
||||
)
|
||||
|
||||
// ScalePrecondition is a deprecated precondition
|
||||
type ScalePrecondition struct {
|
||||
Size int
|
||||
ResourceVersion string
|
||||
}
|
||||
|
||||
// RetryParams is a deprecated retry struct
|
||||
type RetryParams struct {
|
||||
Interval, Timeout time.Duration
|
||||
}
|
||||
|
||||
// PreconditionError is a deprecated error
|
||||
type PreconditionError struct {
|
||||
Precondition string
|
||||
ExpectedValue string
|
||||
ActualValue string
|
||||
}
|
||||
|
||||
func (pe PreconditionError) Error() string {
|
||||
return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue)
|
||||
}
|
||||
|
||||
// ScaleCondition is a closure around Scale that facilitates retries via util.wait
|
||||
func scaleCondition(r *JobPsuedoScaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
rv, err := r.ScaleSimple(namespace, name, precondition, count)
|
||||
if updatedResourceVersion != nil {
|
||||
*updatedResourceVersion = rv
|
||||
}
|
||||
// Retry only on update conflicts.
|
||||
if errors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// JobPsuedoScaler is a deprecated scale-similar thing that doesn't obey scale semantics
|
||||
type JobPsuedoScaler struct {
|
||||
JobsClient batchclient.JobsGetter
|
||||
}
|
||||
|
||||
// ScaleSimple is responsible for updating job's parallelism. It returns the
|
||||
// resourceVersion of the job if the update is successful.
|
||||
func (scaler *JobPsuedoScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) {
|
||||
job, err := scaler.JobsClient.Jobs(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if preconditions != nil {
|
||||
if err := validateJob(job, preconditions); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
parallelism := int32(newSize)
|
||||
job.Spec.Parallelism = ¶llelism
|
||||
updatedJob, err := scaler.JobsClient.Jobs(namespace).Update(job)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return updatedJob.ObjectMeta.ResourceVersion, nil
|
||||
}
|
||||
|
||||
// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil),
|
||||
// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired
|
||||
// number, which can be less than requested based on job's current progress.
|
||||
func (scaler *JobPsuedoScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
|
||||
if preconditions == nil {
|
||||
preconditions = &ScalePrecondition{-1, ""}
|
||||
}
|
||||
if retry == nil {
|
||||
// Make it try only once, immediately
|
||||
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
||||
}
|
||||
cond := scaleCondition(scaler, preconditions, namespace, name, newSize, nil)
|
||||
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
|
||||
return err
|
||||
}
|
||||
if waitForReplicas != nil {
|
||||
job, err := scaler.JobsClient.Jobs(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, jobHasDesiredParallelism(scaler.JobsClient, job))
|
||||
if err == wait.ErrWaitTimeout {
|
||||
return fmt.Errorf("timed out waiting for %q to be synced", name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count
|
||||
// for a job equals the current active counts or is less by an appropriate successful/unsuccessful count.
|
||||
func jobHasDesiredParallelism(jobClient batchclient.JobsGetter, job *batch.Job) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
job, err := jobClient.Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// desired parallelism can be either the exact number, in which case return immediately
|
||||
if job.Status.Active == *job.Spec.Parallelism {
|
||||
return true, nil
|
||||
}
|
||||
if job.Spec.Completions == nil {
|
||||
// A job without specified completions needs to wait for Active to reach Parallelism.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// otherwise count successful
|
||||
progress := *job.Spec.Completions - job.Status.Active - job.Status.Succeeded
|
||||
return progress <= 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
func validateJob(job *batch.Job, precondition *ScalePrecondition) error {
|
||||
if precondition.Size != -1 && job.Spec.Parallelism == nil {
|
||||
return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"}
|
||||
}
|
||||
if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size {
|
||||
return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))}
|
||||
}
|
||||
if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion {
|
||||
return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,292 +0,0 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scale
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
api "k8s.io/apimachinery/pkg/apis/testapigroup/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
batchclient "k8s.io/client-go/kubernetes/typed/batch/v1"
|
||||
testcore "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
type errorJobs struct {
|
||||
batchclient.JobInterface
|
||||
conflict bool
|
||||
invalid bool
|
||||
}
|
||||
|
||||
func (c *errorJobs) Update(job *batch.Job) (*batch.Job, error) {
|
||||
switch {
|
||||
case c.invalid:
|
||||
return nil, kerrors.NewInvalid(api.Kind(job.Kind), job.Name, nil)
|
||||
case c.conflict:
|
||||
return nil, kerrors.NewConflict(api.Resource(job.Kind), job.Name, nil)
|
||||
}
|
||||
return nil, errors.New("Job update failure")
|
||||
}
|
||||
|
||||
func (c *errorJobs) Get(name string, options metav1.GetOptions) (*batch.Job, error) {
|
||||
zero := int32(0)
|
||||
return &batch.Job{
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &zero,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type errorJobClient struct {
|
||||
batchclient.JobsGetter
|
||||
conflict bool
|
||||
invalid bool
|
||||
}
|
||||
|
||||
func (c *errorJobClient) Jobs(namespace string) batchclient.JobInterface {
|
||||
return &errorJobs{
|
||||
JobInterface: c.JobsGetter.Jobs(namespace),
|
||||
conflict: c.conflict,
|
||||
invalid: c.invalid,
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobScaleRetry(t *testing.T) {
|
||||
fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().BatchV1(), conflict: true}
|
||||
scaler := &JobPsuedoScaler{JobsClient: fake}
|
||||
preconditions := ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
namespace := "default"
|
||||
|
||||
scaleFunc := scaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
||||
pass, err := scaleFunc()
|
||||
if pass != false {
|
||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an error on update failure, got %v", err)
|
||||
}
|
||||
preconditions = ScalePrecondition{3, ""}
|
||||
scaleFunc = scaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
||||
pass, err = scaleFunc()
|
||||
if err == nil {
|
||||
t.Error("Expected error on precondition failure")
|
||||
}
|
||||
}
|
||||
|
||||
func job() *batch.Job {
|
||||
return &batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Name: "foo",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobScale(t *testing.T) {
|
||||
fakeClientset := fake.NewSimpleClientset(job())
|
||||
scaler := &JobPsuedoScaler{JobsClient: fakeClientset.BatchV1()}
|
||||
preconditions := ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
actions := fakeClientset.Actions()
|
||||
if len(actions) != 2 {
|
||||
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
|
||||
}
|
||||
if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name {
|
||||
t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name)
|
||||
}
|
||||
if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) {
|
||||
t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobScaleInvalid(t *testing.T) {
|
||||
fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().BatchV1(), invalid: true}
|
||||
scaler := &JobPsuedoScaler{JobsClient: fake}
|
||||
preconditions := ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
namespace := "default"
|
||||
|
||||
scaleFunc := scaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
||||
pass, err := scaleFunc()
|
||||
if pass {
|
||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
}
|
||||
if err == nil {
|
||||
t.Errorf("Expected error on invalid update failure, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobScaleFailsPreconditions(t *testing.T) {
|
||||
ten := int32(10)
|
||||
fake := fake.NewSimpleClientset(&batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &ten,
|
||||
},
|
||||
})
|
||||
scaler := &JobPsuedoScaler{JobsClient: fake.BatchV1()}
|
||||
preconditions := ScalePrecondition{2, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
actions := fake.Actions()
|
||||
if len(actions) != 1 {
|
||||
t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions)
|
||||
}
|
||||
if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name {
|
||||
t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateJob(t *testing.T) {
|
||||
zero, ten, twenty := int32(0), int32(10), int32(20)
|
||||
tests := []struct {
|
||||
preconditions ScalePrecondition
|
||||
job batch.Job
|
||||
expectError bool
|
||||
test string
|
||||
}{
|
||||
{
|
||||
preconditions: ScalePrecondition{-1, ""},
|
||||
expectError: false,
|
||||
test: "defaults",
|
||||
},
|
||||
{
|
||||
preconditions: ScalePrecondition{-1, ""},
|
||||
job: batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &ten,
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
test: "defaults 2",
|
||||
},
|
||||
{
|
||||
preconditions: ScalePrecondition{0, ""},
|
||||
job: batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &zero,
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
test: "size matches",
|
||||
},
|
||||
{
|
||||
preconditions: ScalePrecondition{-1, "foo"},
|
||||
job: batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &ten,
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
test: "resource version matches",
|
||||
},
|
||||
{
|
||||
preconditions: ScalePrecondition{10, "foo"},
|
||||
job: batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &ten,
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
test: "both match",
|
||||
},
|
||||
{
|
||||
preconditions: ScalePrecondition{10, "foo"},
|
||||
job: batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &twenty,
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
test: "size different",
|
||||
},
|
||||
{
|
||||
preconditions: ScalePrecondition{10, "foo"},
|
||||
job: batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
test: "parallelism nil",
|
||||
},
|
||||
{
|
||||
preconditions: ScalePrecondition{10, "foo"},
|
||||
job: batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "bar",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &ten,
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
test: "version different",
|
||||
},
|
||||
{
|
||||
preconditions: ScalePrecondition{10, "foo"},
|
||||
job: batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "bar",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &twenty,
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
test: "both different",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
err := validateJob(&test.job, &test.preconditions)
|
||||
if err != nil && !test.expectError {
|
||||
t.Errorf("unexpected error: %v (%s)", err, test.test)
|
||||
}
|
||||
if err == nil && test.expectError {
|
||||
t.Errorf("expected an error: %v (%s)", err, test.test)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1107,15 +1107,6 @@ run_rc_tests() {
|
|||
# Clean-up
|
||||
kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
|
||||
|
||||
### Scale a job
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/job.yaml "${kube_flags[@]}"
|
||||
# Command
|
||||
kubectl scale --replicas=2 job/pi
|
||||
# Post-condition: 2 replicas for pi
|
||||
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
|
||||
# Clean-up
|
||||
kubectl delete job/pi "${kube_flags[@]}"
|
||||
|
||||
### Scale a deployment
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
|
||||
# Command
|
||||
|
|
Loading…
Reference in New Issue