mirror of https://github.com/k3s-io/k3s
use the core client with version
parent
744455c69d
commit
edce96c5b6
|
@ -131,7 +131,7 @@ func Run(s *options.CloudControllerManagerServer, cloud cloudprovider.Interface)
|
|||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "cloud-controller-manager"})
|
||||
|
||||
run := func(stop <-chan struct{}) {
|
||||
|
@ -142,7 +142,7 @@ func Run(s *options.CloudControllerManagerServer, cloud cloudprovider.Interface)
|
|||
if len(s.ServiceAccountKeyFile) > 0 && s.UseServiceAccountCredentials {
|
||||
clientBuilder = controller.SAControllerClientBuilder{
|
||||
ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
|
||||
CoreClient: kubeClient.Core(),
|
||||
CoreClient: kubeClient.CoreV1(),
|
||||
AuthenticationClient: kubeClient.Authentication(),
|
||||
Namespace: "kube-system",
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ func Run(s *options.CloudControllerManagerServer, cloud cloudprovider.Interface)
|
|||
Namespace: "kube-system",
|
||||
Name: "cloud-controller-manager",
|
||||
},
|
||||
Client: leaderElectionClient.Core(),
|
||||
Client: leaderElectionClient.CoreV1(),
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: id + "-external-cloud-controller",
|
||||
EventRecorder: recorder,
|
||||
|
|
|
@ -64,7 +64,7 @@ func Run(s *GKECertificatesController) error {
|
|||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "gke-certificates-controller"})
|
||||
|
||||
clientBuilder := controller.SimpleControllerClientBuilder{ClientConfig: kubeconfig}
|
||||
|
|
|
@ -154,7 +154,7 @@ func Run(s *options.CMServer) error {
|
|||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "controller-manager"})
|
||||
|
||||
run := func(stop <-chan struct{}) {
|
||||
|
@ -165,7 +165,7 @@ func Run(s *options.CMServer) error {
|
|||
if len(s.ServiceAccountKeyFile) > 0 && s.UseServiceAccountCredentials {
|
||||
clientBuilder = controller.SAControllerClientBuilder{
|
||||
ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
|
||||
CoreClient: kubeClient.Core(),
|
||||
CoreClient: kubeClient.CoreV1(),
|
||||
AuthenticationClient: kubeClient.Authentication(),
|
||||
Namespace: "kube-system",
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ func Run(s *options.CMServer) error {
|
|||
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
|
||||
"kube-system",
|
||||
"kube-controller-manager",
|
||||
leaderElectionClient.Core(),
|
||||
leaderElectionClient.CoreV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
EventRecorder: recorder,
|
||||
|
|
|
@ -413,7 +413,7 @@ func createClients(config componentconfig.ClientConnectionConfiguration, masterO
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
return client, eventClient.Core(), nil
|
||||
return client, eventClient.CoreV1(), nil
|
||||
}
|
||||
|
||||
// NewProxyServer returns a new ProxyServer.
|
||||
|
|
|
@ -499,7 +499,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
|
|||
if err != nil {
|
||||
glog.Warningf("Failed to create API Server client for Events: %v", err)
|
||||
}
|
||||
eventClient = tmpClient.Core()
|
||||
eventClient = tmpClient.CoreV1()
|
||||
|
||||
} else {
|
||||
switch {
|
||||
|
|
|
@ -52,7 +52,7 @@ import (
|
|||
func createRecorder(kubecli *clientset.Clientset, s *options.SchedulerServer) record.EventRecorder {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.Core().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.CoreV1().RESTClient()).Events("")})
|
||||
return eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: s.SchedulerName})
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ func Run(s *options.SchedulerServer) error {
|
|||
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
|
||||
s.LockObjectNamespace,
|
||||
s.LockObjectName,
|
||||
kubecli.Core(),
|
||||
kubecli.CoreV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
EventRecorder: recorder,
|
||||
|
|
|
@ -162,7 +162,7 @@ func main() {
|
|||
}
|
||||
|
||||
// create the pod watcher
|
||||
podListWatcher := cache.NewListWatchFromClient(clientset.Core().RESTClient(), "pods", v1.NamespaceDefault, fields.Everything())
|
||||
podListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", v1.NamespaceDefault, fields.Everything())
|
||||
|
||||
// create the workqueue
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||
|
|
|
@ -272,7 +272,7 @@ func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
|
|||
func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
|
||||
By("Waiting for all pods to be running")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
pods, err := cs.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"})
|
||||
pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -341,7 +341,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
|||
var _ = framework.KubeDescribe("Staging client repo client", func() {
|
||||
f := framework.NewDefaultFramework("clientset")
|
||||
It("should create pods, delete pods, watch pods", func() {
|
||||
podClient := f.StagingClient.Core().Pods(f.Namespace.Name)
|
||||
podClient := f.StagingClient.CoreV1().Pods(f.Namespace.Name)
|
||||
By("constructing the pod")
|
||||
name := "pod" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
|
|
|
@ -120,7 +120,7 @@ func waitForNamespaceInFederatedClusters(clusters ClusterSlice, nsName string) {
|
|||
name := c.Name
|
||||
By(fmt.Sprintf("Waiting for namespace %q to be created in cluster %q", nsName, name))
|
||||
err := wait.PollImmediate(framework.Poll, FederatedDefaultTestTimeout, func() (bool, error) {
|
||||
_, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{})
|
||||
_, err := c.Clientset.CoreV1().Namespaces().Get(nsName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
|
|
|
@ -598,7 +598,7 @@ func waitForSecretOrFail(clientset *kubeclientset.Clientset, nsName string, secr
|
|||
var clusterSecret *v1.Secret
|
||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
var err error
|
||||
clusterSecret, err = clientset.Core().Secrets(nsName).Get(secret.Name, metav1.GetOptions{})
|
||||
clusterSecret, err = clientset.CoreV1().Secrets(nsName).Get(secret.Name, metav1.GetOptions{})
|
||||
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
|
||||
By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is absent", secret.Name, nsName))
|
||||
return true, nil // Success
|
||||
|
|
|
@ -61,8 +61,8 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
|
|||
f.FederationClientset.Core().Namespaces().Delete)
|
||||
for _, cluster := range clusters {
|
||||
deleteNamespace(nil, nsName,
|
||||
cluster.Core().Namespaces().Get,
|
||||
cluster.Core().Namespaces().Delete)
|
||||
cluster.CoreV1().Namespaces().Get,
|
||||
cluster.CoreV1().Namespaces().Delete)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -162,7 +162,7 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters f
|
|||
By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName))
|
||||
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
for _, cluster := range clusters {
|
||||
_, err := cluster.Core().Namespaces().Get(nsName, metav1.GetOptions{})
|
||||
_, err := cluster.CoreV1().Namespaces().Get(nsName, metav1.GetOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters f
|
|||
shouldExist := orphanDependents == nil || *orphanDependents == true
|
||||
for _, cluster := range clusters {
|
||||
clusterName := cluster.Name
|
||||
_, err := cluster.Core().Namespaces().Get(nsName, metav1.GetOptions{})
|
||||
_, err := cluster.CoreV1().Namespaces().Get(nsName, metav1.GetOptions{})
|
||||
if shouldExist && errors.IsNotFound(err) {
|
||||
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName))
|
||||
} else if !shouldExist && !errors.IsNotFound(err) {
|
||||
|
|
|
@ -325,7 +325,7 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
|
|||
By(fmt.Sprintf("Waiting for service %s to be created in all underlying clusters", serviceName))
|
||||
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
for _, cluster := range clusters {
|
||||
_, err := cluster.Core().Services(nsName).Get(serviceName, metav1.GetOptions{})
|
||||
_, err := cluster.CoreV1().Services(nsName).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return false, err
|
||||
|
@ -346,7 +346,7 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
|
|||
shouldExist := orphanDependents == nil || *orphanDependents == true
|
||||
for _, cluster := range clusters {
|
||||
clusterName := cluster.Name
|
||||
_, err := cluster.Core().Services(nsName).Get(serviceName, metav1.GetOptions{})
|
||||
_, err := cluster.CoreV1().Services(nsName).Get(serviceName, metav1.GetOptions{})
|
||||
if shouldExist && errors.IsNotFound(err) {
|
||||
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for service %s in cluster %s, expected service to exist", serviceName, clusterName))
|
||||
} else if !shouldExist && !errors.IsNotFound(err) {
|
||||
|
|
|
@ -381,7 +381,7 @@ func createBackendPodsOrFail(clusters fedframework.ClusterSlice, namespace strin
|
|||
for _, c := range clusters {
|
||||
name := c.Name
|
||||
By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name))
|
||||
createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod)
|
||||
createdPod, err := c.Clientset.CoreV1().Pods(namespace).Create(pod)
|
||||
framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name)
|
||||
By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod))
|
||||
podMap[name] = createdPod
|
||||
|
@ -393,7 +393,7 @@ func createBackendPodsOrFail(clusters fedframework.ClusterSlice, namespace strin
|
|||
// The test fails if there are any errors.
|
||||
func deleteOneBackendPodOrFail(c *fedframework.Cluster, pod *v1.Pod) {
|
||||
Expect(pod).ToNot(BeNil())
|
||||
err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
err := c.Clientset.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
msgFmt := fmt.Sprintf("Deleting Pod %q in namespace %q in cluster %q %%v", pod.Name, pod.Namespace, c.Name)
|
||||
if errors.IsNotFound(err) {
|
||||
framework.Logf(msgFmt, "does not exist. No need to delete it.")
|
||||
|
|
|
@ -377,7 +377,7 @@ func TestSchedulerExtender(t *testing.T) {
|
|||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientSet.Core().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
scheduler, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
|
||||
informerFactory.Start(schedulerConfig.StopEverything)
|
||||
scheduler.Run()
|
||||
|
|
|
@ -83,7 +83,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
|
@ -113,10 +113,10 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||
}
|
||||
|
||||
policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
clientSet.Core().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
ss := options.NewSchedulerServer()
|
||||
ss.HardPodAffinitySymmetricWeight = v1.DefaultHardPodAffinitySymmetricWeight
|
||||
ss.PolicyConfigMapName = configPolicyName
|
||||
|
@ -161,12 +161,12 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
|||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
|
||||
ss := options.NewSchedulerServer()
|
||||
ss.PolicyConfigMapName = "non-existent-config"
|
||||
|
@ -362,7 +362,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, mod := range nodeModifications {
|
||||
unSchedNode, err := context.clientSet.Core().Nodes().Create(node)
|
||||
unSchedNode, err := context.clientSet.CoreV1().Nodes().Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
@ -447,7 +447,7 @@ func TestMultiScheduler(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
context.clientSet.Core().Nodes().Create(node)
|
||||
context.clientSet.CoreV1().Nodes().Create(node)
|
||||
|
||||
// 3. create 3 pods for testing
|
||||
testPod, err := createPausePodWithResource(context.clientSet, "pod-without-scheduler-name", context.ns.Name, nil)
|
||||
|
|
|
@ -79,7 +79,7 @@ func mustSetupScheduler() (schedulerConfigurator scheduler.Configurator, destroy
|
|||
)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(schedulerConfigurator, func(conf *scheduler.Config) {
|
||||
conf.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "scheduler"})
|
||||
|
|
|
@ -120,7 +120,7 @@ func main() {
|
|||
}
|
||||
ns := got.Name
|
||||
defer func(ns string) {
|
||||
if err := client.Core().Namespaces().Delete(ns, nil); err != nil {
|
||||
if err := client.CoreV1().Namespaces().Delete(ns, nil); err != nil {
|
||||
glog.Warningf("Failed to delete namespace ns: %e", ns, err)
|
||||
} else {
|
||||
// wait until the namespace disappears
|
||||
|
|
Loading…
Reference in New Issue