mirror of https://github.com/k3s-io/k3s
use versiond group clients from client-go
parent
9feabbdaeb
commit
b694d51842
|
@ -72,7 +72,7 @@ func (s *GKESigner) handle(csr *capi.CertificateSigningRequest) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error auto signing csr: %v", err)
|
||||
}
|
||||
_, err = s.client.Certificates().CertificateSigningRequests().UpdateStatus(csr)
|
||||
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(csr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating signature for csr: %v", err)
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me
|
|||
go podautoscaler.NewHorizontalController(
|
||||
hpaClientGoClient.CoreV1(),
|
||||
scaleClient,
|
||||
hpaClient.Autoscaling(),
|
||||
hpaClient.AutoscalingV1(),
|
||||
restMapper,
|
||||
replicaCalc,
|
||||
ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
|
|
|
@ -352,9 +352,9 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
|
|||
kubeClient, err = clientset.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
glog.Warningf("New kubeClient from clientConfig error: %v", err)
|
||||
} else if kubeClient.Certificates() != nil && clientCertificateManager != nil {
|
||||
} else if kubeClient.CertificatesV1beta1() != nil && clientCertificateManager != nil {
|
||||
glog.V(2).Info("Starting client certificate rotation.")
|
||||
clientCertificateManager.SetCertificateSigningRequestClient(kubeClient.Certificates().CertificateSigningRequests())
|
||||
clientCertificateManager.SetCertificateSigningRequestClient(kubeClient.CertificatesV1beta1().CertificateSigningRequests())
|
||||
clientCertificateManager.Start()
|
||||
}
|
||||
externalKubeClient, err = clientgoclientset.NewForConfig(clientConfig)
|
||||
|
|
|
@ -106,7 +106,7 @@ func (a *sarApprover) handle(csr *capi.CertificateSigningRequest) error {
|
|||
}
|
||||
if approved {
|
||||
appendApprovalCondition(csr, r.successMessage)
|
||||
_, err = a.client.Certificates().CertificateSigningRequests().UpdateApproval(csr)
|
||||
_, err = a.client.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(csr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating approval for csr: %v", err)
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ func (s *cfsslSigner) handle(csr *capi.CertificateSigningRequest) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error auto signing csr: %v", err)
|
||||
}
|
||||
_, err = s.client.Certificates().CertificateSigningRequests().UpdateStatus(csr)
|
||||
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(csr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating signature for csr: %v", err)
|
||||
}
|
||||
|
|
|
@ -410,7 +410,7 @@ type RealRSControl struct {
|
|||
var _ RSControlInterface = &RealRSControl{}
|
||||
|
||||
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
|
||||
_, err := r.KubeClient.ExtensionsV1beta1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -1069,7 +1069,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet,
|
|||
}
|
||||
numberUnavailable := desiredNumberScheduled - numberAvailable
|
||||
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
|
|
|
@ -585,7 +585,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
|||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
||||
if d.Status.ObservedGeneration < d.Generation {
|
||||
d.Status.ObservedGeneration = d.Generation
|
||||
dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
|
||||
dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
|||
|
||||
newDeployment := d
|
||||
newDeployment.Status = newStatus
|
||||
_, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
||||
_, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -112,6 +112,6 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *extensions.Deployment
|
|||
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *extensions.Deployment) error {
|
||||
glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
||||
d.Spec.RollbackTo = nil
|
||||
_, err := dc.client.Extensions().Deployments(d.Namespace).Update(d)
|
||||
_, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Update(d)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
|
|||
}
|
||||
|
||||
var err error
|
||||
d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
|
||||
d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet,
|
|||
return nil, err
|
||||
}
|
||||
// 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label.
|
||||
updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name,
|
||||
updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name,
|
||||
func(updated *extensions.ReplicaSet) error {
|
||||
// Precondition: the RS doesn't contain the new hash in its pod template label.
|
||||
if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
|
@ -207,7 +207,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet,
|
|||
|
||||
// 3. Update rs label and selector to include the new hash label
|
||||
// Copy the old selector, so that we can scrub out any orphaned pods
|
||||
updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error {
|
||||
updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error {
|
||||
// Precondition: the RS doesn't contain the new hash in its label and selector.
|
||||
if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
return utilerrors.ErrPreconditionViolated
|
||||
|
@ -251,7 +251,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
|||
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
|
||||
if annotationsUpdated || minReadySecondsNeedsUpdate {
|
||||
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
|
||||
return dc.client.Extensions().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
|
||||
return dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
|
||||
}
|
||||
|
||||
// Should use the revision in existingNewRS's annotation, since it set by before
|
||||
|
@ -269,7 +269,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
|||
|
||||
if needsUpdate {
|
||||
var err error
|
||||
if d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d); err != nil {
|
||||
if d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
|||
// hash collisions. If there is any other error, we need to report it in the status of
|
||||
// the Deployment.
|
||||
alreadyExists := false
|
||||
createdRS, err := dc.client.Extensions().ReplicaSets(d.Namespace).Create(&newRS)
|
||||
createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS)
|
||||
switch {
|
||||
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
|
||||
// Fetch a copy of the ReplicaSet. If its PodTemplateSpec is semantically deep equal
|
||||
|
@ -338,7 +338,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
|||
*d.Status.CollisionCount++
|
||||
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
||||
// error.
|
||||
_, dErr := dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
|
||||
_, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
if dErr == nil {
|
||||
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
}
|
||||
|
@ -355,7 +355,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
|||
// We don't really care about this error at this point, since we have a bigger issue to report.
|
||||
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
|
||||
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
|
||||
_, _ = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
|
||||
_, _ = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
}
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
||||
return nil, err
|
||||
|
@ -372,7 +372,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
|||
needsUpdate = true
|
||||
}
|
||||
if needsUpdate {
|
||||
_, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
|
||||
_, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
}
|
||||
return createdRS, err
|
||||
}
|
||||
|
@ -508,7 +508,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
|
|||
var err error
|
||||
if sizeNeedsUpdate || annotationsNeedUpdate {
|
||||
*(rsCopy.Spec.Replicas) = newScale
|
||||
rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
|
||||
rs, err = dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
|
||||
if err == nil && sizeNeedsUpdate {
|
||||
scaled = true
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
||||
|
@ -546,7 +546,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
|
|||
continue
|
||||
}
|
||||
glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
|
||||
if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
if err := dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
// Return error instead of aggregating and continuing DELETEs on the theory
|
||||
// that we may be overloading the api server.
|
||||
return err
|
||||
|
@ -566,7 +566,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.Replic
|
|||
|
||||
newDeployment := d
|
||||
newDeployment.Status = newStatus
|
||||
_, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
||||
_, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -728,7 +728,7 @@ func refresh(pdbClient policyclientset.PodDisruptionBudgetInterface, pdb *policy
|
|||
}
|
||||
|
||||
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
|
||||
pdbClient := dc.kubeClient.Policy().PodDisruptionBudgets(pdb.Namespace)
|
||||
pdbClient := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace)
|
||||
st := pdb.Status
|
||||
|
||||
var err error
|
||||
|
|
|
@ -770,7 +770,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
|||
}
|
||||
|
||||
func (jm *JobController) updateJobStatus(job *batch.Job) error {
|
||||
_, err := jm.kubeClient.Batch().Jobs(job.Namespace).UpdateStatus(job)
|
||||
_, err := jm.kubeClient.BatchV1().Jobs(job.Namespace).UpdateStatus(job)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -599,7 +599,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
|||
newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)
|
||||
|
||||
// Always updates status as pods come up or die.
|
||||
updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.Extensions().ReplicaSets(rs.Namespace), rs, newStatus)
|
||||
updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace), rs, newStatus)
|
||||
if err != nil {
|
||||
// Multiple things could lead to this update failing. Requeuing the replica set ensures
|
||||
// Returning an error causes a requeue without forcing a hotloop
|
||||
|
|
|
@ -37,8 +37,8 @@ import (
|
|||
// or returns an error.
|
||||
func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg *kubeletconfig.KubeletConfiguration, nodeName types.NodeName, ips []net.IP, hostnames []string, certDirectory string) (certificate.Manager, error) {
|
||||
var certSigningRequestClient clientcertificates.CertificateSigningRequestInterface
|
||||
if kubeClient != nil && kubeClient.Certificates() != nil {
|
||||
certSigningRequestClient = kubeClient.Certificates().CertificateSigningRequests()
|
||||
if kubeClient != nil && kubeClient.CertificatesV1beta1() != nil {
|
||||
certSigningRequestClient = kubeClient.CertificatesV1beta1().CertificateSigningRequests()
|
||||
}
|
||||
certificateStore, err := certificate.NewFileStore(
|
||||
"kubelet-server",
|
||||
|
|
|
@ -97,7 +97,7 @@ func (i *initializer) ValidateInitialization() error {
|
|||
|
||||
// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface.
|
||||
func (i *initializer) SetExternalKubeClientSet(client clientset.Interface) {
|
||||
i.config = configuration.NewInitializerConfigurationManager(client.Admissionregistration().InitializerConfigurations())
|
||||
i.config = configuration.NewInitializerConfigurationManager(client.AdmissionregistrationV1alpha1().InitializerConfigurations())
|
||||
}
|
||||
|
||||
// SetAuthorizer implements the WantsAuthorizer interface.
|
||||
|
|
|
@ -153,7 +153,7 @@ func (a *GenericAdmissionWebhook) SetScheme(scheme *runtime.Scheme) {
|
|||
|
||||
// WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it
|
||||
func (a *GenericAdmissionWebhook) SetExternalKubeClientSet(client clientset.Interface) {
|
||||
a.hookSource = configuration.NewExternalAdmissionHookConfigurationManager(client.Admissionregistration().ExternalAdmissionHookConfigurations())
|
||||
a.hookSource = configuration.NewExternalAdmissionHookConfigurationManager(client.AdmissionregistrationV1alpha1().ExternalAdmissionHookConfigurations())
|
||||
}
|
||||
|
||||
// ValidateInitialization implements the InitializationValidator interface.
|
||||
|
|
|
@ -166,7 +166,7 @@ func verifyRemainingDeploymentsReplicaSetsPods(
|
|||
deploymentNum, rsNum, podNum int,
|
||||
) (bool, error) {
|
||||
var ret = true
|
||||
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
rs, err := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list rs: %v", err)
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ func verifyRemainingDeploymentsReplicaSetsPods(
|
|||
ret = false
|
||||
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
|
||||
}
|
||||
deployments, err := clientSet.Extensions().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
deployments, err := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list deployments: %v", err)
|
||||
}
|
||||
|
@ -503,8 +503,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
|
||||
It("should delete RS created by deployment when not orphaning", func() {
|
||||
clientSet := f.ClientSet
|
||||
deployClient := clientSet.Extensions().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.Extensions().ReplicaSets(f.Namespace.Name)
|
||||
deployClient := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name)
|
||||
deploymentName := "simpletest.deployment"
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels := map[string]string{"gctest": "delete_rs"}
|
||||
|
@ -554,8 +554,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
|
||||
It("should orphan RS created by deployment when deleteOptions.OrphanDependents is true", func() {
|
||||
clientSet := f.ClientSet
|
||||
deployClient := clientSet.Extensions().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.Extensions().ReplicaSets(f.Namespace.Name)
|
||||
deployClient := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name)
|
||||
deploymentName := "simpletest.deployment"
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels := map[string]string{"gctest": "orphan_rs"}
|
||||
|
@ -604,7 +604,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
framework.Failf("remaining deployment's post mortem: %#v", remainingDSs)
|
||||
}
|
||||
}
|
||||
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
rs, err := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to list ReplicaSet %v", err)
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring at least two running jobs exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(len(activeJobs) >= 2).To(BeTrue())
|
||||
|
@ -92,7 +92,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Ensuring no job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(jobs.Items).To(HaveLen(0))
|
||||
|
||||
|
@ -119,7 +119,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
|
@ -151,7 +151,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
|
@ -248,7 +248,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring a finished job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, finishedJobs := filterActiveJobs(jobs)
|
||||
Expect(len(finishedJobs) == 1).To(BeTrue())
|
||||
|
@ -259,7 +259,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring there is 1 finished job by listing jobs explicitly")
|
||||
jobs, err = f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
jobs, err = f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, finishedJobs = filterActiveJobs(jobs)
|
||||
Expect(len(finishedJobs) == 1).To(BeTrue())
|
||||
|
@ -370,7 +370,7 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo
|
|||
// Wait for a job to not exist by listing jobs explicitly.
|
||||
func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{})
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -387,7 +387,7 @@ func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job
|
|||
// Wait for a job to be replaced with a new one.
|
||||
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{})
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
|
|||
// waitForJobsAtLeast waits for at least a number of jobs to appear.
|
||||
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{})
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -417,7 +417,7 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
|
|||
// waitForAnyFinishedJob waits for any completed job to appear.
|
||||
func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{})
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
AfterEach(func() {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
|
@ -80,7 +80,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
if daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
|
@ -114,7 +114,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
|
@ -138,7 +138,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
framework.Logf("Creating daemon %q with a node selector", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
|
@ -167,7 +167,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
||||
ds, err = c.Extensions().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred(), "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
|
@ -199,7 +199,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
},
|
||||
},
|
||||
}
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
|
@ -229,7 +229,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
|
@ -253,7 +253,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
|
||||
|
@ -267,7 +267,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.Extensions().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
|
@ -277,7 +277,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.Extensions().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(2)))
|
||||
|
||||
|
@ -294,7 +294,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.Extensions().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
|
@ -311,7 +311,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
|
@ -324,7 +324,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.Extensions().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
|
@ -334,7 +334,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.Extensions().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
templateGeneration++
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
@ -352,7 +352,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.Extensions().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
|
@ -370,7 +370,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
|
@ -392,7 +392,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
newDS := newDaemonSet(newDSName, image, label)
|
||||
newDS.Spec.TemplateGeneration = templateGeneration
|
||||
newDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newDS, err = c.Extensions().DaemonSets(ns).Create(newDS)
|
||||
newDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newDS.Spec.Template, ds.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
@ -409,7 +409,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS := newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS, err = c.Extensions().DaemonSets(ns).Create(newAdoptDS)
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).NotTo(Equal(templateGeneration))
|
||||
|
@ -429,7 +429,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
newAdoptDS.Spec.Template.Spec.Containers[0].Name = "not-match"
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS.Spec.TemplateGeneration = templateGeneration
|
||||
newAdoptDS, err = c.Extensions().DaemonSets(ns).Create(newAdoptDS)
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).NotTo(BeTrue(), "DaemonSet template should not match")
|
||||
|
@ -446,7 +446,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
|
@ -515,7 +515,7 @@ func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) {
|
|||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
|
||||
err := c.Extensions().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
err := c.ExtensionsV1beta1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
|
@ -713,7 +713,7 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *extensions.DaemonSet) fun
|
|||
}
|
||||
|
||||
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
||||
ds, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get daemon set from v1.")
|
||||
}
|
||||
|
@ -780,7 +780,7 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m
|
|||
|
||||
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
_, err := c.Extensions().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
_, err := c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ var _ = SIGDescribe("Deployment", func() {
|
|||
})
|
||||
|
||||
func failureTrap(c clientset.Interface, ns string) {
|
||||
deployments, err := c.Extensions().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
deployments, err := c.ExtensionsV1beta1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
|
||||
return
|
||||
|
@ -161,7 +161,7 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
|
|||
}
|
||||
|
||||
func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting deployment %s", deploymentName)
|
||||
|
@ -173,14 +173,14 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.Extensions().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.Extensions().ReplicaSets(ns).List(options)
|
||||
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(rss.Items).Should(HaveLen(0))
|
||||
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
|
@ -211,7 +211,7 @@ func testDeleteDeployment(f *framework.Framework) {
|
|||
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
|
@ -221,7 +221,7 @@ func testDeleteDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -247,7 +247,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
|
||||
rs.Annotations = annotations
|
||||
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
|
@ -257,7 +257,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
deploymentName := "test-rolling-update-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
|
@ -271,7 +271,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -290,7 +290,7 @@ func testRecreateDeployment(f *framework.Framework) {
|
|||
deploymentName := "test-recreate-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.Extensions().Deployments(ns).Create(d)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
|
@ -326,7 +326,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
rsName := "test-cleanup-controller"
|
||||
replicas := int32(1)
|
||||
revisionHistoryLimit := utilpointer.Int32Ptr(0)
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
|
@ -375,7 +375,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
}()
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.Extensions().Deployments(ns).Create(d)
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
|
@ -397,7 +397,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
|
||||
rsName := "test-rollover-controller"
|
||||
rsReplicas := int32(1)
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
|
@ -420,11 +420,11 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
MaxSurge: intOrStrP(1),
|
||||
}
|
||||
newDeployment.Spec.MinReadySeconds = int32(10)
|
||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment)
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(newDeployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the pods were scaled up and down as expected.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
|
@ -435,7 +435,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
oldRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
|
@ -466,11 +466,11 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both old replica sets have no replicas")
|
||||
oldRS, err = c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
oldRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(0))
|
||||
// Not really the new replica set anymore but we GET by name so that's fine.
|
||||
newRS, err = c.Extensions().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
newRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(newRS, int32(0))
|
||||
}
|
||||
|
@ -500,7 +500,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
createAnnotation := map[string]string{"action": "create", "author": "node"}
|
||||
d.Annotations = createAnnotation
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
|
@ -544,7 +544,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
revision := int64(1)
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback := newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.Extensions().Deployments(ns).Rollback(rollback)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
|
@ -567,7 +567,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
revision = 0
|
||||
framework.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.Extensions().Deployments(ns).Rollback(rollback)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
|
@ -589,7 +589,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
revision = 10
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.Extensions().Deployments(ns).Rollback(rollback)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
|
@ -607,7 +607,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
revision = 4
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.Extensions().Deployments(ns).Rollback(rollback)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
|
@ -636,7 +636,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.Extensions().Deployments(ns).Create(d)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
|
@ -664,7 +664,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
|
||||
|
@ -675,14 +675,14 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
second, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
first, err = c.Extensions().ReplicaSets(first.Namespace).Get(first.Name, metav1.GetOptions{})
|
||||
first, err = c.ExtensionsV1beta1().ReplicaSets(first.Namespace).Get(first.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
firstCond := replicaSetHasDesiredReplicas(c.Extensions(), first)
|
||||
firstCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), first)
|
||||
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, firstCond)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
secondCond := replicaSetHasDesiredReplicas(c.Extensions(), second)
|
||||
secondCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), second)
|
||||
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, secondCond)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -724,7 +724,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
|
||||
|
@ -732,17 +732,17 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
}
|
||||
|
||||
framework.Logf("Checking that the replica sets for %q are synced", deploymentName)
|
||||
oldRs, err := c.Extensions().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
|
||||
oldRs, err := c.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
newRs, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
oldCond := replicaSetHasDesiredReplicas(c.Extensions(), oldRs)
|
||||
oldCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), oldRs)
|
||||
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, oldCond)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
newCond := replicaSetHasDesiredReplicas(c.Extensions(), newRs)
|
||||
newCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), newRs)
|
||||
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, newCond)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -804,7 +804,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
d.Spec.RevisionHistoryLimit = &two
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.Extensions().Deployments(ns).Create(d)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
iterations := 20
|
||||
|
@ -886,7 +886,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
}
|
||||
|
||||
// unpause the deployment if we end up pausing it
|
||||
deployment, err = c.Extensions().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if deployment.Spec.Paused {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
|
@ -924,7 +924,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -944,7 +944,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||
deploymentName = "test-adopt-deployment"
|
||||
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.Extensions().Deployments(ns).Create(d)
|
||||
deploy, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -991,7 +991,7 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m
|
|||
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *extensions.ReplicaSetList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rsList, err := c.Extensions().ReplicaSets(ns).List(options)
|
||||
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(rsList.Items)).To(BeNumerically(">", 0))
|
||||
return rsList
|
||||
|
@ -1001,5 +1001,5 @@ func orphanDeploymentReplicaSets(c clientset.Interface, d *extensions.Deployment
|
|||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
|
||||
return c.Extensions().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
|
||||
return c.ExtensionsV1beta1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
|||
framework.Logf("Creating ReplicaSet %s", name)
|
||||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||
_, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check that pods for the new RS were created.
|
||||
|
@ -187,14 +187,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||
|
||||
By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
||||
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rs, err = c.Extensions().ReplicaSets(namespace).Create(rs)
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
|
||||
generation := rs.Generation
|
||||
conditions := rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||
generation = rs.Generation
|
||||
conditions = rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||
replicas := int32(1)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
||||
rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the orphan pod is adopted")
|
||||
|
|
|
@ -293,7 +293,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e
|
|||
func getDNSReplicas(c clientset.Interface) (int, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
deployments, err := c.Extensions().Deployments(metav1.NamespaceSystem).List(listOpts)
|
||||
deployments, err := c.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(listOpts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
@ -327,14 +327,14 @@ func (rc *ResourceConsumer) GetReplicas() int {
|
|||
}
|
||||
return int(replicationController.Status.ReadyReplicas)
|
||||
case KindDeployment:
|
||||
deployment, err := rc.clientSet.Extensions().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
deployment, err := rc.clientSet.ExtensionsV1beta1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if deployment == nil {
|
||||
framework.Failf(deploymentIsNil)
|
||||
}
|
||||
return int(deployment.Status.ReadyReplicas)
|
||||
case KindReplicaSet:
|
||||
rs, err := rc.clientSet.Extensions().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
rs, err := rc.clientSet.ExtensionsV1beta1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if rs == nil {
|
||||
framework.Failf(rsIsNil)
|
||||
|
@ -515,11 +515,11 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
|
|||
TargetCPUUtilizationPercentage: &cpu,
|
||||
},
|
||||
}
|
||||
hpa, errHPA := rc.clientSet.Autoscaling().HorizontalPodAutoscalers(rc.nsName).Create(hpa)
|
||||
hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(hpa)
|
||||
framework.ExpectNoError(errHPA)
|
||||
return hpa
|
||||
}
|
||||
|
||||
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
|
||||
rc.clientSet.Autoscaling().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
|
||||
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
|
|||
var d *extensions.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
|
|||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.Extensions().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
w, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -947,7 +947,7 @@ func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations m
|
|||
j.Ingress.Annotations[k] = v
|
||||
}
|
||||
Logf(fmt.Sprintf("creating" + j.Ingress.Name + " ingress"))
|
||||
j.Ingress, err = j.Client.Extensions().Ingresses(ns).Create(j.Ingress)
|
||||
j.Ingress, err = j.Client.ExtensionsV1beta1().Ingresses(ns).Create(j.Ingress)
|
||||
ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
@ -956,12 +956,12 @@ func (j *IngressTestJig) Update(update func(ing *extensions.Ingress)) {
|
|||
var err error
|
||||
ns, name := j.Ingress.Namespace, j.Ingress.Name
|
||||
for i := 0; i < 3; i++ {
|
||||
j.Ingress, err = j.Client.Extensions().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
j.Ingress, err = j.Client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("failed to get ingress %q: %v", name, err)
|
||||
}
|
||||
update(j.Ingress)
|
||||
j.Ingress, err = j.Client.Extensions().Ingresses(ns).Update(j.Ingress)
|
||||
j.Ingress, err = j.Client.ExtensionsV1beta1().Ingresses(ns).Update(j.Ingress)
|
||||
if err == nil {
|
||||
DescribeIng(j.Ingress.Namespace)
|
||||
return
|
||||
|
@ -999,7 +999,7 @@ func (j *IngressTestJig) GetRootCA(secretName string) (rootCA []byte) {
|
|||
|
||||
// TryDeleteIngress attempts to delete the ingress resource and logs errors if they occur.
|
||||
func (j *IngressTestJig) TryDeleteIngress() {
|
||||
err := j.Client.Extensions().Ingresses(j.Ingress.Namespace).Delete(j.Ingress.Name, nil)
|
||||
err := j.Client.ExtensionsV1beta1().Ingresses(j.Ingress.Namespace).Delete(j.Ingress.Name, nil)
|
||||
if err != nil {
|
||||
Logf("Error while deleting the ingress %v/%v: %v", j.Ingress.Namespace, j.Ingress.Name, err)
|
||||
}
|
||||
|
|
|
@ -112,19 +112,19 @@ func NewTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, compl
|
|||
|
||||
// GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid.
|
||||
func GetJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
|
||||
return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has
|
||||
// been created.
|
||||
func CreateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
return c.Batch().Jobs(ns).Create(job)
|
||||
return c.BatchV1().Jobs(ns).Create(job)
|
||||
}
|
||||
|
||||
// UpdateJob uses c to updated job in namespace ns. If the returned error is nil, the returned Job is valid and has
|
||||
// been updated.
|
||||
func UpdateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
return c.Batch().Jobs(ns).Update(job)
|
||||
return c.BatchV1().Jobs(ns).Update(job)
|
||||
}
|
||||
|
||||
// UpdateJobFunc updates the job object. It retries if there is a conflict, throw out error if
|
||||
|
@ -153,7 +153,7 @@ func UpdateJobFunc(c clientset.Interface, ns, name string, updateFn func(job *ba
|
|||
// DeleteJob uses c to delete the Job named name in namespace ns. If the returned error is nil, the Job has been
|
||||
// deleted.
|
||||
func DeleteJob(c clientset.Interface, ns, name string) error {
|
||||
return c.Batch().Jobs(ns).Delete(name, nil)
|
||||
return c.BatchV1().Jobs(ns).Delete(name, nil)
|
||||
}
|
||||
|
||||
// GetJobPods returns a list of Pods belonging to a Job.
|
||||
|
@ -184,7 +184,7 @@ func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, paralle
|
|||
// WaitForJobFinish uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int
|
|||
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
|
||||
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ func newBool(val bool) *bool {
|
|||
type updateJobFunc func(*batch.Job)
|
||||
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
|
||||
jobs := c.Batch().Jobs(namespace)
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
|
|
|
@ -38,7 +38,7 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string,
|
|||
|
||||
// CheckNewRSAnnotations check if the new RS's annotation is as expected
|
||||
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp
|
|||
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
|
||||
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -639,7 +639,7 @@ func (j *ServiceTestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
|
|||
|
||||
func (j *ServiceTestJig) CreatePDBOrFail(namespace string, rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget {
|
||||
pdb := j.newPDBTemplate(namespace, rc)
|
||||
newPdb, err := j.Client.Policy().PodDisruptionBudgets(namespace).Create(pdb)
|
||||
newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
|
||||
if err != nil {
|
||||
Failf("Failed to create PDB %q %v", pdb.Name, err)
|
||||
}
|
||||
|
@ -696,7 +696,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.Replicati
|
|||
func (j *ServiceTestJig) waitForPdbReady(namespace string) error {
|
||||
timeout := 2 * time.Minute
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
|
||||
pdb, err := j.Client.Policy().PodDisruptionBudgets(namespace).Get(j.Name, metav1.GetOptions{})
|
||||
pdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(j.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -599,7 +599,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
|||
replicaOk += rc.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
rsList, err := c.Extensions().ReplicaSets(ns).List(metav1.ListOptions{})
|
||||
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error getting replication sets in namespace %q: %v", ns, err)
|
||||
if IsRetryableAPIError(err) {
|
||||
|
@ -2804,13 +2804,13 @@ func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, n
|
|||
case api.Kind("ReplicationController"):
|
||||
return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
|
||||
return c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
return c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
|
||||
return c.Extensions().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
return c.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("DaemonSet"):
|
||||
return c.Extensions().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
return c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
case batchinternal.Kind("Job"):
|
||||
return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
|
||||
}
|
||||
|
@ -2821,13 +2821,13 @@ func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name strin
|
|||
case api.Kind("ReplicationController"):
|
||||
return c.CoreV1().ReplicationControllers(ns).Delete(name, deleteOption)
|
||||
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
|
||||
return c.Extensions().ReplicaSets(ns).Delete(name, deleteOption)
|
||||
return c.ExtensionsV1beta1().ReplicaSets(ns).Delete(name, deleteOption)
|
||||
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
|
||||
return c.Extensions().Deployments(ns).Delete(name, deleteOption)
|
||||
return c.ExtensionsV1beta1().Deployments(ns).Delete(name, deleteOption)
|
||||
case extensionsinternal.Kind("DaemonSet"):
|
||||
return c.Extensions().DaemonSets(ns).Delete(name, deleteOption)
|
||||
return c.ExtensionsV1beta1().DaemonSets(ns).Delete(name, deleteOption)
|
||||
case batchinternal.Kind("Job"):
|
||||
return c.Batch().Jobs(ns).Delete(name, deleteOption)
|
||||
return c.BatchV1().Jobs(ns).Delete(name, deleteOption)
|
||||
default:
|
||||
return fmt.Errorf("Unsupported kind when deleting: %v", kind)
|
||||
}
|
||||
|
@ -4104,7 +4104,7 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
|
|||
|
||||
// getIngressAddress returns the ips/hostnames associated with the Ingress.
|
||||
func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) {
|
||||
ing, err := client.Extensions().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
ing, err := client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
|
|||
for _, rcLabel := range rcLabels {
|
||||
selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
deploymentList, err := c.Extensions().Deployments(metav1.NamespaceSystem).List(options)
|
||||
deploymentList, err := c.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -495,7 +495,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
|||
ExecOrDie()
|
||||
Expect(runOutput).To(ContainSubstring("abcd1234"))
|
||||
Expect(runOutput).To(ContainSubstring("stdin closed"))
|
||||
Expect(c.Batch().Jobs(ns).Delete("run-test", nil)).To(BeNil())
|
||||
Expect(c.BatchV1().Jobs(ns).Delete("run-test", nil)).To(BeNil())
|
||||
|
||||
By("executing a command with run and attach without stdin")
|
||||
runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
|
||||
|
@ -503,7 +503,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
|||
ExecOrDie()
|
||||
Expect(runOutput).ToNot(ContainSubstring("abcd1234"))
|
||||
Expect(runOutput).To(ContainSubstring("stdin closed"))
|
||||
Expect(c.Batch().Jobs(ns).Delete("run-test-2", nil)).To(BeNil())
|
||||
Expect(c.BatchV1().Jobs(ns).Delete("run-test-2", nil)).To(BeNil())
|
||||
|
||||
By("executing a command with run and attach with stdin with open stdin should remain running")
|
||||
runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
|
||||
|
@ -534,7 +534,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
|||
}
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
Expect(c.Batch().Jobs(ns).Delete("run-test-3", nil)).To(BeNil())
|
||||
Expect(c.BatchV1().Jobs(ns).Delete("run-test-3", nil)).To(BeNil())
|
||||
})
|
||||
|
||||
It("should support port-forward", func() {
|
||||
|
@ -1257,7 +1257,7 @@ metadata:
|
|||
By("running the image " + nginxImage)
|
||||
framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag)
|
||||
By("verifying the deployment " + dName + " was created")
|
||||
d, err := c.Extensions().Deployments(ns).Get(dName, metav1.GetOptions{})
|
||||
d, err := c.ExtensionsV1beta1().Deployments(ns).Get(dName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting deployment %s: %v", dName, err)
|
||||
}
|
||||
|
@ -1297,7 +1297,7 @@ metadata:
|
|||
By("running the image " + nginxImage)
|
||||
framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag)
|
||||
By("verifying the job " + jobName + " was created")
|
||||
job, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting job %s: %v", jobName, err)
|
||||
}
|
||||
|
@ -1445,7 +1445,7 @@ metadata:
|
|||
Expect(runOutput).To(ContainSubstring("stdin closed"))
|
||||
|
||||
By("verifying the job " + jobName + " was deleted")
|
||||
_, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
_, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(apierrs.IsNotFound(err)).To(BeTrue())
|
||||
})
|
||||
|
|
|
@ -177,7 +177,7 @@ func testNvidiaGPUsOnCOS(f *framework.Framework) {
|
|||
ds, err := framework.DsFromManifest(dsYamlUrl)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ds.Namespace = f.Namespace.Name
|
||||
_, err = f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Create(ds)
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Create(ds)
|
||||
framework.ExpectNoError(err, "failed to create daemonset")
|
||||
framework.Logf("Successfully created daemonset to install Nvidia drivers. Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
|
||||
// Wait for Nvidia GPUs to be available on nodes
|
||||
|
|
|
@ -61,7 +61,7 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() {
|
|||
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
deployments, err := f.ClientSet.Extensions().Deployments(metav1.NamespaceSystem).List(listOpts)
|
||||
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(listOpts)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(len(deployments.Items)).Should(Equal(1))
|
||||
|
||||
|
|
|
@ -266,17 +266,17 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||
})
|
||||
|
||||
func getPodPreset(c clientset.Interface, ns, name string) (*settings.PodPreset, error) {
|
||||
return c.Settings().PodPresets(ns).Get(name, metav1.GetOptions{})
|
||||
return c.SettingsV1alpha1().PodPresets(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func createPodPreset(c clientset.Interface, ns string, job *settings.PodPreset) (*settings.PodPreset, error) {
|
||||
return c.Settings().PodPresets(ns).Create(job)
|
||||
return c.SettingsV1alpha1().PodPresets(ns).Create(job)
|
||||
}
|
||||
|
||||
func updatePodPreset(c clientset.Interface, ns string, job *settings.PodPreset) (*settings.PodPreset, error) {
|
||||
return c.Settings().PodPresets(ns).Update(job)
|
||||
return c.SettingsV1alpha1().PodPresets(ns).Update(job)
|
||||
}
|
||||
|
||||
func deletePodPreset(c clientset.Interface, ns, name string) error {
|
||||
return c.Settings().PodPresets(ns).Delete(name, nil)
|
||||
return c.SettingsV1alpha1().PodPresets(ns).Delete(name, nil)
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
|
|||
|
||||
By("Creating a DaemonSet")
|
||||
var err error
|
||||
if t.daemonSet, err = f.ClientSet.Extensions().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
|
||||
if t.daemonSet, err = f.ClientSet.ExtensionsV1beta1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
|
||||
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -315,7 +315,7 @@ func (config *DeploymentConfig) create() error {
|
|||
|
||||
config.applyTo(&deployment.Spec.Template)
|
||||
|
||||
_, err := config.Client.Extensions().Deployments(config.Namespace).Create(deployment)
|
||||
_, err := config.Client.ExtensionsV1beta1().Deployments(config.Namespace).Create(deployment)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating deployment: %v", err)
|
||||
}
|
||||
|
@ -382,7 +382,7 @@ func (config *ReplicaSetConfig) create() error {
|
|||
|
||||
config.applyTo(&rs.Spec.Template)
|
||||
|
||||
_, err := config.Client.Extensions().ReplicaSets(config.Namespace).Create(rs)
|
||||
_, err := config.Client.ExtensionsV1beta1().ReplicaSets(config.Namespace).Create(rs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating replica set: %v", err)
|
||||
}
|
||||
|
@ -445,7 +445,7 @@ func (config *JobConfig) create() error {
|
|||
|
||||
config.applyTo(&job.Spec.Template)
|
||||
|
||||
_, err := config.Client.Batch().Jobs(config.Namespace).Create(job)
|
||||
_, err := config.Client.BatchV1().Jobs(config.Namespace).Create(job)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating job: %v", err)
|
||||
}
|
||||
|
@ -1232,7 +1232,7 @@ func (config *DaemonConfig) Run() error {
|
|||
},
|
||||
}
|
||||
|
||||
_, err := config.Client.Extensions().DaemonSets(config.Namespace).Create(daemon)
|
||||
_, err := config.Client.ExtensionsV1beta1().DaemonSets(config.Namespace).Create(daemon)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating DaemonSet %v: %v", config.Name, err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue