Remove ScheduledJobs support

pull/6/head
Maciej Szulik 2017-07-31 15:37:32 +02:00
parent 654de23cbf
commit e465962bf7
No known key found for this signature in database
GPG Key ID: F15E55D276FA84C4
9 changed files with 8 additions and 69 deletions

View File

@ -95,8 +95,6 @@ func TestDefaulting(t *testing.T) {
{Group: "batch", Version: "v2alpha1", Kind: "Job"}: {},
{Group: "batch", Version: "v2alpha1", Kind: "JobList"}: {},
{Group: "batch", Version: "v2alpha1", Kind: "JobTemplate"}: {},
{Group: "batch", Version: "v2alpha1", Kind: "ScheduledJob"}: {},
{Group: "batch", Version: "v2alpha1", Kind: "ScheduledJobList"}: {},
{Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequest"}: {},
{Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequestList"}: {},
{Group: "componentconfig", Version: "v1alpha1", Kind: "KubeProxyConfiguration"}: {},

View File

@ -51,7 +51,5 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&CronJob{},
&CronJobList{},
)
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{})
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{})
return nil
}

View File

@ -474,7 +474,6 @@ const (
DeploymentBasicAppsV1Beta1GeneratorName = "deployment-basic/apps.v1beta1"
JobV1GeneratorName = "job/v1"
CronJobV2Alpha1GeneratorName = "cronjob/v2alpha1"
ScheduledJobV2Alpha1GeneratorName = "scheduledjob/v2alpha1"
NamespaceV1GeneratorName = "namespace/v1"
ResourceQuotaV1GeneratorName = "resourcequotas/v1"
SecretV1GeneratorName = "secret/v1"
@ -521,7 +520,6 @@ func DefaultGenerators(cmdName string) map[string]kubectl.Generator {
DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{},
DeploymentAppsV1Beta1GeneratorName: kubectl.DeploymentAppsV1Beta1{},
JobV1GeneratorName: kubectl.JobV1{},
ScheduledJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{},
CronJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{},
}
case "autoscale":

View File

@ -19,7 +19,6 @@ package rest
import (
batchapiv1 "k8s.io/api/batch/v1"
batchapiv2alpha1 "k8s.io/api/batch/v2alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
@ -37,18 +36,14 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag
// If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities.
// TODO refactor the plumbing to provide the information in the APIGroupInfo
if apiResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv2alpha1.SchemeGroupVersion) {
apiGroupInfo.VersionedResourcesStorageMap[batchapiv2alpha1.SchemeGroupVersion.Version] = p.v2alpha1Storage(apiResourceConfigSource, restOptionsGetter)
apiGroupInfo.GroupMeta.GroupVersion = batchapiv2alpha1.SchemeGroupVersion
apiGroupInfo.SubresourceGroupVersionKind = map[string]schema.GroupVersionKind{
"scheduledjobs": batchapiv2alpha1.SchemeGroupVersion.WithKind("ScheduledJob"),
"scheduledjobs/status": batchapiv2alpha1.SchemeGroupVersion.WithKind("ScheduledJob"),
}
}
if apiResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv1.SchemeGroupVersion) {
apiGroupInfo.VersionedResourcesStorageMap[batchapiv1.SchemeGroupVersion.Version] = p.v1Storage(apiResourceConfigSource, restOptionsGetter)
apiGroupInfo.GroupMeta.GroupVersion = batchapiv1.SchemeGroupVersion
}
if apiResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv2alpha1.SchemeGroupVersion) {
apiGroupInfo.VersionedResourcesStorageMap[batchapiv2alpha1.SchemeGroupVersion.Version] = p.v2alpha1Storage(apiResourceConfigSource, restOptionsGetter)
apiGroupInfo.GroupMeta.GroupVersion = batchapiv2alpha1.SchemeGroupVersion
}
return apiGroupInfo, true
}
@ -73,8 +68,6 @@ func (p RESTStorageProvider) v2alpha1Storage(apiResourceConfigSource serverstora
cronJobsStorage, cronJobsStatusStorage := cronjobstore.NewREST(restOptionsGetter)
storage["cronjobs"] = cronJobsStorage
storage["cronjobs/status"] = cronJobsStatusStorage
storage["scheduledjobs"] = cronJobsStorage
storage["scheduledjobs/status"] = cronJobsStatusStorage
}
return storage
}

View File

@ -181,7 +181,7 @@ func ClusterRoles() []rbac.ClusterRole {
rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
"deployments", "deployments/scale", "deployments/rollback", "ingresses",
@ -213,7 +213,7 @@ func ClusterRoles() []rbac.ClusterRole {
rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
"deployments", "deployments/scale", "deployments/rollback", "ingresses",
@ -237,7 +237,7 @@ func ClusterRoles() []rbac.ClusterRole {
rbac.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(),
rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale",
"ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(),

View File

@ -48,8 +48,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&CronJob{},
&CronJobList{},
)
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{})
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{})
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -32,6 +32,5 @@ const (
)
var (
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"}
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
)

View File

@ -146,8 +146,6 @@ var (
kubectlContainerExitCodeVersion = utilversion.MustParseSemantic("v1.4.0-alpha.3")
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"}
)
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
@ -199,44 +197,6 @@ var _ = SIGDescribe("Kubectl alpha client", func() {
// Customized Wait / ForEach wrapper for this test. These demonstrate the
framework.KubeDescribe("Kubectl run ScheduledJob", func() {
var nsFlag string
var sjName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
sjName = "e2e-test-echo-scheduledjob"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "cronjobs", sjName, nsFlag)
})
It("should create a ScheduledJob", func() {
framework.SkipIfMissingResource(f.ClientPool, ScheduledJobGroupVersionResource, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the ScheduledJob " + sjName + " was created")
sj, err := c.BatchV2alpha1().CronJobs(ns).Get(sjName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err)
}
if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a ScheduledJob with correct schedule %s, but got %s", schedule, sj.Spec.Schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers)
}
restartPolicy := sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a ScheduledJob with correct restart policy %s, but got %s", v1.RestartPolicyOnFailure, restartPolicy)
}
})
})
framework.KubeDescribe("Kubectl run CronJob", func() {
var nsFlag string
var cjName string

View File

@ -197,11 +197,6 @@ var etcdStorageData = map[schema.GroupVersionResource]struct {
stub: `{"metadata": {"name": "cj1"}, "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"labels": {"controller-uid": "uid0"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container0"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}, "schedule": "* * * * *"}}`,
expectedEtcdPath: "/registry/cronjobs/etcdstoragepathtestnamespace/cj1",
},
gvr("batch", "v2alpha1", "scheduledjobs"): {
stub: `{"metadata": {"name": "cj2"}, "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"labels": {"controller-uid": "uid0"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container0"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}, "schedule": "* * * * *"}}`,
expectedEtcdPath: "/registry/cronjobs/etcdstoragepathtestnamespace/cj2",
expectedGVK: gvkP("batch", "v2alpha1", "CronJob"), // scheduledjobs were deprecated by cronjobs
},
// --
// k8s.io/kubernetes/pkg/apis/certificates/v1beta1