Merge pull request #49538 from supereagle/versioned-coreclient

Automatic merge from submit-queue (batch tested with PRs 49538, 49708, 47665, 49750, 49528)

Use the core client with version

**What this PR does / why we need it**:
Replace the **deprecated** `clientSet.Core()` with `clientSet.CoreV1()`.

**Which issue this PR fixes**: fixes #49535

**Special notes for your reviewer**:

**Release note**:
```release-note
NONE
```
pull/6/head
Kubernetes Submit Queue 2017-07-29 05:28:49 -07:00 committed by GitHub
commit 740ca9a052
19 changed files with 34 additions and 34 deletions

View File

@ -131,7 +131,7 @@ func Run(s *options.CloudControllerManagerServer, cloud cloudprovider.Interface)
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "cloud-controller-manager"}) recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "cloud-controller-manager"})
run := func(stop <-chan struct{}) { run := func(stop <-chan struct{}) {
@ -142,7 +142,7 @@ func Run(s *options.CloudControllerManagerServer, cloud cloudprovider.Interface)
if len(s.ServiceAccountKeyFile) > 0 && s.UseServiceAccountCredentials { if len(s.ServiceAccountKeyFile) > 0 && s.UseServiceAccountCredentials {
clientBuilder = controller.SAControllerClientBuilder{ clientBuilder = controller.SAControllerClientBuilder{
ClientConfig: restclient.AnonymousClientConfig(kubeconfig), ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
CoreClient: kubeClient.Core(), CoreClient: kubeClient.CoreV1(),
AuthenticationClient: kubeClient.Authentication(), AuthenticationClient: kubeClient.Authentication(),
Namespace: "kube-system", Namespace: "kube-system",
} }
@ -172,7 +172,7 @@ func Run(s *options.CloudControllerManagerServer, cloud cloudprovider.Interface)
Namespace: "kube-system", Namespace: "kube-system",
Name: "cloud-controller-manager", Name: "cloud-controller-manager",
}, },
Client: leaderElectionClient.Core(), Client: leaderElectionClient.CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{ LockConfig: resourcelock.ResourceLockConfig{
Identity: id + "-external-cloud-controller", Identity: id + "-external-cloud-controller",
EventRecorder: recorder, EventRecorder: recorder,

View File

@ -64,7 +64,7 @@ func Run(s *GKECertificatesController) error {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "gke-certificates-controller"}) recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "gke-certificates-controller"})
clientBuilder := controller.SimpleControllerClientBuilder{ClientConfig: kubeconfig} clientBuilder := controller.SimpleControllerClientBuilder{ClientConfig: kubeconfig}

View File

@ -155,7 +155,7 @@ func Run(s *options.CMServer) error {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "controller-manager"}) recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "controller-manager"})
run := func(stop <-chan struct{}) { run := func(stop <-chan struct{}) {
@ -166,7 +166,7 @@ func Run(s *options.CMServer) error {
if len(s.ServiceAccountKeyFile) > 0 && s.UseServiceAccountCredentials { if len(s.ServiceAccountKeyFile) > 0 && s.UseServiceAccountCredentials {
clientBuilder = controller.SAControllerClientBuilder{ clientBuilder = controller.SAControllerClientBuilder{
ClientConfig: restclient.AnonymousClientConfig(kubeconfig), ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
CoreClient: kubeClient.Core(), CoreClient: kubeClient.CoreV1(),
AuthenticationClient: kubeClient.Authentication(), AuthenticationClient: kubeClient.Authentication(),
Namespace: "kube-system", Namespace: "kube-system",
} }
@ -201,7 +201,7 @@ func Run(s *options.CMServer) error {
rl, err := resourcelock.New(s.LeaderElection.ResourceLock, rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
"kube-system", "kube-system",
"kube-controller-manager", "kube-controller-manager",
leaderElectionClient.Core(), leaderElectionClient.CoreV1(),
resourcelock.ResourceLockConfig{ resourcelock.ResourceLockConfig{
Identity: id, Identity: id,
EventRecorder: recorder, EventRecorder: recorder,

View File

@ -413,7 +413,7 @@ func createClients(config componentconfig.ClientConnectionConfiguration, masterO
return nil, nil, err return nil, nil, err
} }
return client, eventClient.Core(), nil return client, eventClient.CoreV1(), nil
} }
// NewProxyServer returns a new ProxyServer. // NewProxyServer returns a new ProxyServer.

View File

@ -501,7 +501,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
if err != nil { if err != nil {
glog.Warningf("Failed to create API Server client for Events: %v", err) glog.Warningf("Failed to create API Server client for Events: %v", err)
} }
eventClient = tmpClient.Core() eventClient = tmpClient.CoreV1()
} else { } else {
switch { switch {

View File

@ -52,7 +52,7 @@ import (
func createRecorder(kubecli *clientset.Clientset, s *options.SchedulerServer) record.EventRecorder { func createRecorder(kubecli *clientset.Clientset, s *options.SchedulerServer) record.EventRecorder {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.Core().RESTClient()).Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.CoreV1().RESTClient()).Events("")})
return eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: s.SchedulerName}) return eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: s.SchedulerName})
} }

View File

@ -121,7 +121,7 @@ func Run(s *options.SchedulerServer) error {
rl, err := resourcelock.New(s.LeaderElection.ResourceLock, rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
s.LockObjectNamespace, s.LockObjectNamespace,
s.LockObjectName, s.LockObjectName,
kubecli.Core(), kubecli.CoreV1(),
resourcelock.ResourceLockConfig{ resourcelock.ResourceLockConfig{
Identity: id, Identity: id,
EventRecorder: recorder, EventRecorder: recorder,

View File

@ -162,7 +162,7 @@ func main() {
} }
// create the pod watcher // create the pod watcher
podListWatcher := cache.NewListWatchFromClient(clientset.Core().RESTClient(), "pods", v1.NamespaceDefault, fields.Everything()) podListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", v1.NamespaceDefault, fields.Everything())
// create the workqueue // create the workqueue
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())

View File

@ -272,7 +272,7 @@ func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) { func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
By("Waiting for all pods to be running") By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
pods, err := cs.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"}) pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -341,7 +341,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
var _ = framework.KubeDescribe("Staging client repo client", func() { var _ = framework.KubeDescribe("Staging client repo client", func() {
f := framework.NewDefaultFramework("clientset") f := framework.NewDefaultFramework("clientset")
It("should create pods, delete pods, watch pods", func() { It("should create pods, delete pods, watch pods", func() {
podClient := f.StagingClient.Core().Pods(f.Namespace.Name) podClient := f.StagingClient.CoreV1().Pods(f.Namespace.Name)
By("constructing the pod") By("constructing the pod")
name := "pod" + string(uuid.NewUUID()) name := "pod" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())

View File

@ -120,7 +120,7 @@ func waitForNamespaceInFederatedClusters(clusters ClusterSlice, nsName string) {
name := c.Name name := c.Name
By(fmt.Sprintf("Waiting for namespace %q to be created in cluster %q", nsName, name)) By(fmt.Sprintf("Waiting for namespace %q to be created in cluster %q", nsName, name))
err := wait.PollImmediate(framework.Poll, FederatedDefaultTestTimeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, FederatedDefaultTestTimeout, func() (bool, error) {
_, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}) _, err := c.Clientset.CoreV1().Namespaces().Get(nsName, metav1.GetOptions{})
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
return false, nil return false, nil
} else if err != nil { } else if err != nil {

View File

@ -598,7 +598,7 @@ func waitForSecretOrFail(clientset *kubeclientset.Clientset, nsName string, secr
var clusterSecret *v1.Secret var clusterSecret *v1.Secret
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
var err error var err error
clusterSecret, err = clientset.Core().Secrets(nsName).Get(secret.Name, metav1.GetOptions{}) clusterSecret, err = clientset.CoreV1().Secrets(nsName).Get(secret.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone. if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is absent", secret.Name, nsName)) By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is absent", secret.Name, nsName))
return true, nil // Success return true, nil // Success

View File

@ -61,8 +61,8 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
f.FederationClientset.Core().Namespaces().Delete) f.FederationClientset.Core().Namespaces().Delete)
for _, cluster := range clusters { for _, cluster := range clusters {
deleteNamespace(nil, nsName, deleteNamespace(nil, nsName,
cluster.Core().Namespaces().Get, cluster.CoreV1().Namespaces().Get,
cluster.Core().Namespaces().Delete) cluster.CoreV1().Namespaces().Delete)
} }
}) })
@ -162,7 +162,7 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters f
By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName)) By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters { for _, cluster := range clusters {
_, err := cluster.Core().Namespaces().Get(nsName, metav1.GetOptions{}) _, err := cluster.CoreV1().Namespaces().Get(nsName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
return false, err return false, err
} }
@ -183,7 +183,7 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters f
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for _, cluster := range clusters { for _, cluster := range clusters {
clusterName := cluster.Name clusterName := cluster.Name
_, err := cluster.Core().Namespaces().Get(nsName, metav1.GetOptions{}) _, err := cluster.CoreV1().Namespaces().Get(nsName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {

View File

@ -325,7 +325,7 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
By(fmt.Sprintf("Waiting for service %s to be created in all underlying clusters", serviceName)) By(fmt.Sprintf("Waiting for service %s to be created in all underlying clusters", serviceName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters { for _, cluster := range clusters {
_, err := cluster.Core().Services(nsName).Get(serviceName, metav1.GetOptions{}) _, err := cluster.CoreV1().Services(nsName).Get(serviceName, metav1.GetOptions{})
if err != nil { if err != nil {
if !errors.IsNotFound(err) { if !errors.IsNotFound(err) {
return false, err return false, err
@ -346,7 +346,7 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for _, cluster := range clusters { for _, cluster := range clusters {
clusterName := cluster.Name clusterName := cluster.Name
_, err := cluster.Core().Services(nsName).Get(serviceName, metav1.GetOptions{}) _, err := cluster.CoreV1().Services(nsName).Get(serviceName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for service %s in cluster %s, expected service to exist", serviceName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for service %s in cluster %s, expected service to exist", serviceName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {

View File

@ -381,7 +381,7 @@ func createBackendPodsOrFail(clusters fedframework.ClusterSlice, namespace strin
for _, c := range clusters { for _, c := range clusters {
name := c.Name name := c.Name
By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name)) By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name))
createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod) createdPod, err := c.Clientset.CoreV1().Pods(namespace).Create(pod)
framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name) framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name)
By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod)) By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod))
podMap[name] = createdPod podMap[name] = createdPod
@ -393,7 +393,7 @@ func createBackendPodsOrFail(clusters fedframework.ClusterSlice, namespace strin
// The test fails if there are any errors. // The test fails if there are any errors.
func deleteOneBackendPodOrFail(c *fedframework.Cluster, pod *v1.Pod) { func deleteOneBackendPodOrFail(c *fedframework.Cluster, pod *v1.Pod) {
Expect(pod).ToNot(BeNil()) Expect(pod).ToNot(BeNil())
err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) err := c.Clientset.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
msgFmt := fmt.Sprintf("Deleting Pod %q in namespace %q in cluster %q %%v", pod.Name, pod.Namespace, c.Name) msgFmt := fmt.Sprintf("Deleting Pod %q in namespace %q in cluster %q %%v", pod.Name, pod.Namespace, c.Name)
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
framework.Logf(msgFmt, "does not exist. No need to delete it.") framework.Logf(msgFmt, "does not exist. No need to delete it.")

View File

@ -377,7 +377,7 @@ func TestSchedulerExtender(t *testing.T) {
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}) schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientSet.Core().RESTClient()).Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientSet.CoreV1().RESTClient()).Events("")})
scheduler, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...) scheduler, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
informerFactory.Start(schedulerConfig.StopEverything) informerFactory.Start(schedulerConfig.StopEverything)
scheduler.Run() scheduler.Run()

View File

@ -83,7 +83,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
defer framework.DeleteTestingNamespace(ns, s, t) defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}}) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{}) defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0) informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
// Pre-register some predicate and priority functions // Pre-register some predicate and priority functions
@ -113,10 +113,10 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
} }
policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String() policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
clientSet.Core().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap) clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")}) eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
ss := options.NewSchedulerServer() ss := options.NewSchedulerServer()
ss.HardPodAffinitySymmetricWeight = v1.DefaultHardPodAffinitySymmetricWeight ss.HardPodAffinitySymmetricWeight = v1.DefaultHardPodAffinitySymmetricWeight
ss.PolicyConfigMapName = configPolicyName ss.PolicyConfigMapName = configPolicyName
@ -161,12 +161,12 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
defer framework.DeleteTestingNamespace(ns, s, t) defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}}) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{}) defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0) informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")}) eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
ss := options.NewSchedulerServer() ss := options.NewSchedulerServer()
ss.PolicyConfigMapName = "non-existent-config" ss.PolicyConfigMapName = "non-existent-config"
@ -362,7 +362,7 @@ func TestUnschedulableNodes(t *testing.T) {
} }
for i, mod := range nodeModifications { for i, mod := range nodeModifications {
unSchedNode, err := context.clientSet.Core().Nodes().Create(node) unSchedNode, err := context.clientSet.CoreV1().Nodes().Create(node)
if err != nil { if err != nil {
t.Fatalf("Failed to create node: %v", err) t.Fatalf("Failed to create node: %v", err)
} }
@ -447,7 +447,7 @@ func TestMultiScheduler(t *testing.T) {
}, },
}, },
} }
context.clientSet.Core().Nodes().Create(node) context.clientSet.CoreV1().Nodes().Create(node)
// 3. create 3 pods for testing // 3. create 3 pods for testing
testPod, err := createPausePodWithResource(context.clientSet, "pod-without-scheduler-name", context.ns.Name, nil) testPod, err := createPausePodWithResource(context.clientSet, "pod-without-scheduler-name", context.ns.Name, nil)

View File

@ -79,7 +79,7 @@ func mustSetupScheduler() (schedulerConfigurator scheduler.Configurator, destroy
) )
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")}) eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
sched, err := scheduler.NewFromConfigurator(schedulerConfigurator, func(conf *scheduler.Config) { sched, err := scheduler.NewFromConfigurator(schedulerConfigurator, func(conf *scheduler.Config) {
conf.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "scheduler"}) conf.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "scheduler"})

View File

@ -120,7 +120,7 @@ func main() {
} }
ns := got.Name ns := got.Name
defer func(ns string) { defer func(ns string) {
if err := client.Core().Namespaces().Delete(ns, nil); err != nil { if err := client.CoreV1().Namespaces().Delete(ns, nil); err != nil {
glog.Warningf("Failed to delete namespace ns: %e", ns, err) glog.Warningf("Failed to delete namespace ns: %e", ns, err)
} else { } else {
// wait until the namespace disappears // wait until the namespace disappears