diff --git a/pkg/api/defaulting_test.go b/pkg/api/defaulting_test.go index 562f1fda8b..1e9823a97f 100644 --- a/pkg/api/defaulting_test.go +++ b/pkg/api/defaulting_test.go @@ -95,8 +95,6 @@ func TestDefaulting(t *testing.T) { {Group: "batch", Version: "v2alpha1", Kind: "Job"}: {}, {Group: "batch", Version: "v2alpha1", Kind: "JobList"}: {}, {Group: "batch", Version: "v2alpha1", Kind: "JobTemplate"}: {}, - {Group: "batch", Version: "v2alpha1", Kind: "ScheduledJob"}: {}, - {Group: "batch", Version: "v2alpha1", Kind: "ScheduledJobList"}: {}, {Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequest"}: {}, {Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequestList"}: {}, {Group: "componentconfig", Version: "v1alpha1", Kind: "KubeProxyConfiguration"}: {}, diff --git a/pkg/apis/batch/register.go b/pkg/apis/batch/register.go index 4601ca4ec1..49414bd3fc 100644 --- a/pkg/apis/batch/register.go +++ b/pkg/apis/batch/register.go @@ -51,7 +51,5 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CronJob{}, &CronJobList{}, ) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) return nil } diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index 7e04fb8e80..ebcc2a8e16 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -474,7 +474,6 @@ const ( DeploymentBasicAppsV1Beta1GeneratorName = "deployment-basic/apps.v1beta1" JobV1GeneratorName = "job/v1" CronJobV2Alpha1GeneratorName = "cronjob/v2alpha1" - ScheduledJobV2Alpha1GeneratorName = "scheduledjob/v2alpha1" NamespaceV1GeneratorName = "namespace/v1" ResourceQuotaV1GeneratorName = "resourcequotas/v1" SecretV1GeneratorName = "secret/v1" @@ -521,7 +520,6 @@ func DefaultGenerators(cmdName string) map[string]kubectl.Generator { DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{}, DeploymentAppsV1Beta1GeneratorName: kubectl.DeploymentAppsV1Beta1{}, JobV1GeneratorName: kubectl.JobV1{}, - ScheduledJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{}, CronJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{}, } case "autoscale": diff --git a/pkg/registry/batch/rest/storage_batch.go b/pkg/registry/batch/rest/storage_batch.go index bc16427c30..c11d1ddc15 100644 --- a/pkg/registry/batch/rest/storage_batch.go +++ b/pkg/registry/batch/rest/storage_batch.go @@ -19,7 +19,6 @@ package rest import ( batchapiv1 "k8s.io/api/batch/v1" batchapiv2alpha1 "k8s.io/api/batch/v2alpha1" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" @@ -37,18 +36,14 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo - if apiResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv2alpha1.SchemeGroupVersion) { - apiGroupInfo.VersionedResourcesStorageMap[batchapiv2alpha1.SchemeGroupVersion.Version] = p.v2alpha1Storage(apiResourceConfigSource, restOptionsGetter) - apiGroupInfo.GroupMeta.GroupVersion = batchapiv2alpha1.SchemeGroupVersion - apiGroupInfo.SubresourceGroupVersionKind = map[string]schema.GroupVersionKind{ - "scheduledjobs": batchapiv2alpha1.SchemeGroupVersion.WithKind("ScheduledJob"), - "scheduledjobs/status": batchapiv2alpha1.SchemeGroupVersion.WithKind("ScheduledJob"), - } - } if apiResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv1.SchemeGroupVersion) { apiGroupInfo.VersionedResourcesStorageMap[batchapiv1.SchemeGroupVersion.Version] = p.v1Storage(apiResourceConfigSource, restOptionsGetter) apiGroupInfo.GroupMeta.GroupVersion = batchapiv1.SchemeGroupVersion } + if apiResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv2alpha1.SchemeGroupVersion) { + apiGroupInfo.VersionedResourcesStorageMap[batchapiv2alpha1.SchemeGroupVersion.Version] = p.v2alpha1Storage(apiResourceConfigSource, restOptionsGetter) + apiGroupInfo.GroupMeta.GroupVersion = batchapiv2alpha1.SchemeGroupVersion + } return apiGroupInfo, true } @@ -73,8 +68,6 @@ func (p RESTStorageProvider) v2alpha1Storage(apiResourceConfigSource serverstora cronJobsStorage, cronJobsStatusStorage := cronjobstore.NewREST(restOptionsGetter) storage["cronjobs"] = cronJobsStorage storage["cronjobs/status"] = cronJobsStatusStorage - storage["scheduledjobs"] = cronJobsStorage - storage["scheduledjobs/status"] = cronJobsStatusStorage } return storage } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 9ae9620241..f69dfabd6e 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -181,7 +181,7 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(), + rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "deployments/rollback", "ingresses", @@ -213,7 +213,7 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(), + rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "deployments/rollback", "ingresses", @@ -237,7 +237,7 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), - rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(), + rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(), diff --git a/staging/src/k8s.io/api/batch/v2alpha1/register.go b/staging/src/k8s.io/api/batch/v2alpha1/register.go index b1c512f3ae..4b02c0f481 100644 --- a/staging/src/k8s.io/api/batch/v2alpha1/register.go +++ b/staging/src/k8s.io/api/batch/v2alpha1/register.go @@ -48,8 +48,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CronJob{}, &CronJobList{}, ) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/test/e2e/apps/types.go b/test/e2e/apps/types.go index 6e54767e6b..2f875e947a 100644 --- a/test/e2e/apps/types.go +++ b/test/e2e/apps/types.go @@ -32,6 +32,5 @@ const ( ) var ( - CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"} - ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"} + CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"} ) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 8f15cfdf7b..9910c97ea6 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -146,8 +146,6 @@ var ( kubectlContainerExitCodeVersion = utilversion.MustParseSemantic("v1.4.0-alpha.3") CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"} - - ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"} ) // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped. @@ -199,44 +197,6 @@ var _ = SIGDescribe("Kubectl alpha client", func() { // Customized Wait / ForEach wrapper for this test. These demonstrate the - framework.KubeDescribe("Kubectl run ScheduledJob", func() { - var nsFlag string - var sjName string - - BeforeEach(func() { - nsFlag = fmt.Sprintf("--namespace=%v", ns) - sjName = "e2e-test-echo-scheduledjob" - }) - - AfterEach(func() { - framework.RunKubectlOrDie("delete", "cronjobs", sjName, nsFlag) - }) - - It("should create a ScheduledJob", func() { - framework.SkipIfMissingResource(f.ClientPool, ScheduledJobGroupVersionResource, f.Namespace.Name) - - schedule := "*/5 * * * ?" - framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1", - "--schedule="+schedule, "--image="+busyboxImage, nsFlag) - By("verifying the ScheduledJob " + sjName + " was created") - sj, err := c.BatchV2alpha1().CronJobs(ns).Get(sjName, metav1.GetOptions{}) - if err != nil { - framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err) - } - if sj.Spec.Schedule != schedule { - framework.Failf("Failed creating a ScheduledJob with correct schedule %s, but got %s", schedule, sj.Spec.Schedule) - } - containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers - if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage { - framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers) - } - restartPolicy := sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy - if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { - framework.Failf("Failed creating a ScheduledJob with correct restart policy %s, but got %s", v1.RestartPolicyOnFailure, restartPolicy) - } - }) - }) - framework.KubeDescribe("Kubectl run CronJob", func() { var nsFlag string var cjName string diff --git a/test/integration/etcd/etcd_storage_path_test.go b/test/integration/etcd/etcd_storage_path_test.go index 8ccfa5bdc6..a05d4694b6 100644 --- a/test/integration/etcd/etcd_storage_path_test.go +++ b/test/integration/etcd/etcd_storage_path_test.go @@ -197,11 +197,6 @@ var etcdStorageData = map[schema.GroupVersionResource]struct { stub: `{"metadata": {"name": "cj1"}, "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"labels": {"controller-uid": "uid0"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container0"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}, "schedule": "* * * * *"}}`, expectedEtcdPath: "/registry/cronjobs/etcdstoragepathtestnamespace/cj1", }, - gvr("batch", "v2alpha1", "scheduledjobs"): { - stub: `{"metadata": {"name": "cj2"}, "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"labels": {"controller-uid": "uid0"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container0"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}, "schedule": "* * * * *"}}`, - expectedEtcdPath: "/registry/cronjobs/etcdstoragepathtestnamespace/cj2", - expectedGVK: gvkP("batch", "v2alpha1", "CronJob"), // scheduledjobs were deprecated by cronjobs - }, // -- // k8s.io/kubernetes/pkg/apis/certificates/v1beta1