From 032416c75deddfdd826beadccb6da2b910b6534d Mon Sep 17 00:00:00 2001 From: supereagle Date: Sun, 12 Nov 2017 19:00:21 +0800 Subject: [PATCH] use core client with explicit version fix more usage of deprecated core client --- .../resourcequota/resource_quota_controller_test.go | 2 +- .../volume/persistentvolume/pv_controller.go | 2 +- pkg/kubectl/cmd/drain.go | 8 ++++---- pkg/kubectl/cmd/top_node.go | 4 ++-- pkg/kubectl/cmd/top_pod.go | 4 ++-- pkg/kubectl/cmd/util/env/env_resolve.go | 4 ++-- pkg/volume/azure_file/azure_util.go | 4 ++-- pkg/volume/cephfs/cephfs.go | 2 +- pkg/volume/iscsi/attacher.go | 2 +- pkg/volume/iscsi/iscsi.go | 2 +- pkg/volume/rbd/rbd.go | 2 +- test/e2e/auth/pod_security_policy.go | 10 +++++----- test/e2e/framework/deployment_util.go | 2 +- .../instrumentation/logging/elasticsearch/kibana.go | 6 +++--- .../e2e/instrumentation/logging/elasticsearch/utils.go | 10 +++++----- test/e2e/multicluster/ubernetes_lite.go | 2 +- test/e2e/network/service.go | 2 +- test/e2e/upgrades/apps/daemonsets.go | 4 ++-- test/e2e/upgrades/apps/statefulset.go | 2 +- test/integration/deployment/util.go | 2 +- 20 files changed, 38 insertions(+), 38 deletions(-) diff --git a/pkg/controller/resourcequota/resource_quota_controller_test.go b/pkg/controller/resourcequota/resource_quota_controller_test.go index dcbbad8b7b..6ed7ba9c96 100644 --- a/pkg/controller/resourcequota/resource_quota_controller_test.go +++ b/pkg/controller/resourcequota/resource_quota_controller_test.go @@ -89,7 +89,7 @@ func setupQuotaController(t *testing.T, kubeClient kubernetes.Interface, lister alwaysStarted := make(chan struct{}) close(alwaysStarted) resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{ - QuotaClient: kubeClient.Core(), + QuotaClient: kubeClient.CoreV1(), ResourceQuotaInformer: informerFactory.Core().V1().ResourceQuotas(), ResyncPeriod: controller.NoResyncPeriodFunc, ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc, diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 980d960c75..9614cb6568 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -802,7 +802,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV // API server. The claim is not modified in this method! func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, claim *v1.PersistentVolumeClaim, updateCache bool) (*v1.PersistentVolume, error) { glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name) - newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) if err != nil { glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err) return newVol, err diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 910654f6f6..39bb33121f 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -331,7 +331,7 @@ func (o *DrainOptions) deleteOrEvictPodsSimple(nodeInfo *resource.Info) error { func (o *DrainOptions) getController(namespace string, controllerRef *metav1.OwnerReference) (interface{}, error) { switch controllerRef.Kind { case "ReplicationController": - return o.client.Core().ReplicationControllers(namespace).Get(controllerRef.Name, metav1.GetOptions{}) + return o.client.CoreV1().ReplicationControllers(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "DaemonSet": return o.client.Extensions().DaemonSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "Job": @@ -455,7 +455,7 @@ func (ps podStatuses) Message() string { // getPodsForDeletion receives resource info for a node, and returns all the pods from the given node that we // are planning on deleting. If there are any pods preventing us from deleting, we return that list in an error. func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev1.Pod, err error) { - podList, err := o.client.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ + podList, err := o.client.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeInfo.Name}).String()}) if err != nil { return pods, err @@ -497,7 +497,7 @@ func (o *DrainOptions) deletePod(pod corev1.Pod) error { gracePeriodSeconds := int64(o.GracePeriodSeconds) deleteOptions.GracePeriodSeconds = &gracePeriodSeconds } - return o.client.Core().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) + return o.client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) } func (o *DrainOptions) evictPod(pod corev1.Pod, policyGroupVersion string) error { @@ -533,7 +533,7 @@ func (o *DrainOptions) deleteOrEvictPods(pods []corev1.Pod) error { } getPodFn := func(namespace, name string) (*corev1.Pod, error) { - return o.client.Core().Pods(namespace).Get(name, metav1.GetOptions{}) + return o.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) } if len(policyGroupVersion) > 0 { diff --git a/pkg/kubectl/cmd/top_node.go b/pkg/kubectl/cmd/top_node.go index ab7b992cd5..dace2918d6 100644 --- a/pkg/kubectl/cmd/top_node.go +++ b/pkg/kubectl/cmd/top_node.go @@ -122,8 +122,8 @@ func (o *TopNodeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] if err != nil { return err } - o.NodeClient = clientset.Core() - o.Client = metricsutil.NewHeapsterMetricsClient(clientset.Core(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port) + o.NodeClient = clientset.CoreV1() + o.Client = metricsutil.NewHeapsterMetricsClient(clientset.CoreV1(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port) o.Printer = metricsutil.NewTopCmdPrinter(out) return nil } diff --git a/pkg/kubectl/cmd/top_pod.go b/pkg/kubectl/cmd/top_pod.go index d64c012f04..2ffdc6dff8 100644 --- a/pkg/kubectl/cmd/top_pod.go +++ b/pkg/kubectl/cmd/top_pod.go @@ -118,8 +118,8 @@ func (o *TopPodOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s if err != nil { return err } - o.PodClient = clientset.Core() - o.Client = metricsutil.NewHeapsterMetricsClient(clientset.Core(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port) + o.PodClient = clientset.CoreV1() + o.Client = metricsutil.NewHeapsterMetricsClient(clientset.CoreV1(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port) o.Printer = metricsutil.NewTopCmdPrinter(out) return nil } diff --git a/pkg/kubectl/cmd/util/env/env_resolve.go b/pkg/kubectl/cmd/util/env/env_resolve.go index d663f73047..ede70215ad 100644 --- a/pkg/kubectl/cmd/util/env/env_resolve.go +++ b/pkg/kubectl/cmd/util/env/env_resolve.go @@ -46,7 +46,7 @@ func getSecretRefValue(client kubernetes.Interface, namespace string, store *Res secret, ok := store.SecretStore[secretSelector.Name] if !ok { var err error - secret, err = client.Core().Secrets(namespace).Get(secretSelector.Name, metav1.GetOptions{}) + secret, err = client.CoreV1().Secrets(namespace).Get(secretSelector.Name, metav1.GetOptions{}) if err != nil { return "", err } @@ -64,7 +64,7 @@ func getConfigMapRefValue(client kubernetes.Interface, namespace string, store * configMap, ok := store.ConfigMapStore[configMapSelector.Name] if !ok { var err error - configMap, err = client.Core().ConfigMaps(namespace).Get(configMapSelector.Name, metav1.GetOptions{}) + configMap, err = client.CoreV1().ConfigMaps(namespace).Get(configMapSelector.Name, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/pkg/volume/azure_file/azure_util.go b/pkg/volume/azure_file/azure_util.go index 038d78518d..f1685dad4e 100644 --- a/pkg/volume/azure_file/azure_util.go +++ b/pkg/volume/azure_file/azure_util.go @@ -50,7 +50,7 @@ func (s *azureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secret return "", "", fmt.Errorf("Cannot get kube client") } - keys, err := kubeClient.Core().Secrets(nameSpace).Get(secretName, metav1.GetOptions{}) + keys, err := kubeClient.CoreV1().Secrets(nameSpace).Get(secretName, metav1.GetOptions{}) if err != nil { return "", "", fmt.Errorf("Couldn't get secret %v/%v", nameSpace, secretName) } @@ -85,7 +85,7 @@ func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accoun }, Type: "Opaque", } - _, err := kubeClient.Core().Secrets(nameSpace).Create(secret) + _, err := kubeClient.CoreV1().Secrets(nameSpace).Create(secret) if errors.IsAlreadyExists(err) { err = nil } diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index 5c0fdd04a7..234ee7f25c 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -100,7 +100,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume. if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/pkg/volume/iscsi/attacher.go b/pkg/volume/iscsi/attacher.go index c039a70db8..b86b2f2499 100644 --- a/pkg/volume/iscsi/attacher.go +++ b/pkg/volume/iscsi/attacher.go @@ -186,7 +186,7 @@ func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volum if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secretObj, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) return nil, err diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index a7893ec721..4ea6e792ef 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -118,7 +118,7 @@ func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.V if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secretObj, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) return nil, err diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 596a19dca8..2bad82bcdf 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -164,7 +164,7 @@ func (plugin *rbdPlugin) createMounterFromVolumeSpecAndPod(spec *volume.Spec, po if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index cc6b287891..3a3849907d 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -131,7 +131,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { It("should forbid pod creation when no PSP is available", func() { By("Running a restricted pod") - _, err := c.Core().Pods(ns).Create(restrictedPod(f, "restricted")) + _, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "restricted")) expectForbidden(err) }) @@ -141,12 +141,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { defer cleanup() By("Running a restricted pod") - pod, err := c.Core().Pods(ns).Create(restrictedPod(f, "allowed")) + pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed")) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace)) testPrivilegedPods(f, func(pod *v1.Pod) { - _, err := c.Core().Pods(ns).Create(pod) + _, err := c.CoreV1().Pods(ns).Create(pod) expectForbidden(err) }) }) @@ -160,12 +160,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { defer cleanup() testPrivilegedPods(f, func(pod *v1.Pod) { - p, err := c.Core().Pods(ns).Create(pod) + p, err := c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace)) // Verify expected PSP was used. - p, err = c.Core().Pods(ns).Get(p.Name, metav1.GetOptions{}) + p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) validated, found := p.Annotations[psputil.ValidatedPSPAnnotation] Expect(found).To(BeTrue(), "PSP annotation not found") diff --git a/test/e2e/framework/deployment_util.go b/test/e2e/framework/deployment_util.go index 8a249d1e13..192e78a9fe 100644 --- a/test/e2e/framework/deployment_util.go +++ b/test/e2e/framework/deployment_util.go @@ -286,7 +286,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Dep return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name) } podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { - return client.Core().Pods(namespace).List(options) + return client.CoreV1().Pods(namespace).List(options) } rsList := []*extensions.ReplicaSet{replicaSet} podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) diff --git a/test/e2e/instrumentation/logging/elasticsearch/kibana.go b/test/e2e/instrumentation/logging/elasticsearch/kibana.go index cc4a4c9c44..da554bec09 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/kibana.go +++ b/test/e2e/instrumentation/logging/elasticsearch/kibana.go @@ -57,7 +57,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { // Check for the existence of the Kibana service. ginkgo.By("Checking the Kibana service exists.") - s := f.ClientSet.Core().Services(metav1.NamespaceSystem) + s := f.ClientSet.CoreV1().Services(metav1.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { @@ -73,7 +73,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { ginkgo.By("Checking to make sure the Kibana pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options) + pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, pod := range pods.Items { err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod) @@ -82,7 +82,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { ginkgo.By("Checking to make sure we get a response from the Kibana UI.") err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { - req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) + req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if err != nil { framework.Logf("Failed to get services proxy request: %v", err) return false, nil diff --git a/test/e2e/instrumentation/logging/elasticsearch/utils.go b/test/e2e/instrumentation/logging/elasticsearch/utils.go index dd1ca884f4..ecc30cda3d 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/utils.go +++ b/test/e2e/instrumentation/logging/elasticsearch/utils.go @@ -55,7 +55,7 @@ func (p *esLogProvider) Init() error { f := p.Framework // Check for the existence of the Elasticsearch service. framework.Logf("Checking the Elasticsearch service exists.") - s := f.ClientSet.Core().Services(api.NamespaceSystem) + s := f.ClientSet.CoreV1().Services(api.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. var err error @@ -73,7 +73,7 @@ func (p *esLogProvider) Init() error { framework.Logf("Checking to make sure the Elasticsearch pods are running") labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String() options := meta_v1.ListOptions{LabelSelector: labelSelector} - pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) + pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options) if err != nil { return err } @@ -90,7 +90,7 @@ func (p *esLogProvider) Init() error { err = nil var body []byte for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue @@ -124,7 +124,7 @@ func (p *esLogProvider) Init() error { framework.Logf("Checking health of Elasticsearch service.") healthy := false for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue @@ -172,7 +172,7 @@ func (p *esLogProvider) Cleanup() { func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { f := p.Framework - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { framework.Logf("Failed to get services proxy request: %v", errProxy) return nil diff --git a/test/e2e/multicluster/ubernetes_lite.go b/test/e2e/multicluster/ubernetes_lite.go index 7ceace73d8..a83ad4d26a 100644 --- a/test/e2e/multicluster/ubernetes_lite.go +++ b/test/e2e/multicluster/ubernetes_lite.go @@ -433,7 +433,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) By("Creating pods for each static PV") for _, config := range configs { podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "") - config.pod, err = c.Core().Pods(ns).Create(podConfig) + config.pod, err = c.CoreV1().Pods(ns).Create(podConfig) Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 147837e06b..d97be344a8 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -813,7 +813,7 @@ var _ = SIGDescribe("Services", func() { tcpService := jig.CreateTCPServiceOrFail(ns, nil) defer func() { framework.Logf("Cleaning up the updating NodePorts test service") - err := cs.Core().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(serviceName, nil) Expect(err).NotTo(HaveOccurred()) }() jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 4b9f19c514..3550f651bd 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -119,7 +119,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) } func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector map[string]string) (bool, error) { - nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return false, err } @@ -139,7 +139,7 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet map[string]string, nodeNames []string) (bool, error) { selector := labels.Set(labelSet).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := f.ClientSet.Core().Pods(namespace).List(options) + podList, err := f.ClientSet.CoreV1().Pods(namespace).List(options) if err != nil { return false, err } diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index 6b3224290e..e5075f6d0e 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -66,7 +66,7 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { t.tester.PauseNewPods(t.set) By("Creating service " + headlessSvcName + " in namespace " + ns) - _, err := f.ClientSet.Core().Services(ns).Create(t.service) + _, err := f.ClientSet.CoreV1().Services(ns).Create(t.service) Expect(err).NotTo(HaveOccurred()) By("Creating statefulset " + ssName + " in namespace " + ns) diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go index dc196c92a3..bb6c1d48be 100644 --- a/test/integration/deployment/util.go +++ b/test/integration/deployment/util.go @@ -210,7 +210,7 @@ func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image str func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error { addPodConditionReady(pod, metav1.Now()) - _, err := c.Core().Pods(ns).UpdateStatus(pod) + _, err := c.CoreV1().Pods(ns).UpdateStatus(pod) return err }