Fix code implicitly casting clientsets to getters

pull/6/head
Dr. Stefan Schimanski 2017-07-21 12:46:24 +02:00
parent 25f2b0a2c1
commit 1910b5a1dd
18 changed files with 106 additions and 116 deletions

View File

@ -106,7 +106,7 @@ func StartTestServer(t *testing.T) (result *restclient.Config, tearDownForCaller
default: default:
} }
result := client.CoreV1Client.RESTClient().Get().AbsPath("/healthz").Do() result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do()
status := 0 status := 0
result.StatusCode(&status) result.StatusCode(&status)
if status == 200 { if status == 200 {

View File

@ -27,7 +27,6 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
@ -121,7 +120,7 @@ func TestMarkMaster(t *testing.T) {
})) }))
defer s.Close() defer s.Close()
cs, err := clientsetFromTestServer(s) cs, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
if err != nil { if err != nil {
t.Fatalf("MarkMaster(%s): unexpected error building clientset: %v", tc.name, err) t.Fatalf("MarkMaster(%s): unexpected error building clientset: %v", tc.name, err)
} }
@ -137,15 +136,6 @@ func TestMarkMaster(t *testing.T) {
} }
} }
func clientsetFromTestServer(s *httptest.Server) (*clientset.Clientset, error) {
rc := &restclient.Config{Host: s.URL}
c, err := corev1.NewForConfig(rc)
if err != nil {
return nil, err
}
return &clientset.Clientset{CoreV1Client: c}, nil
}
func toString(r io.Reader) string { func toString(r io.Reader) string {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
buf.ReadFrom(r) buf.ReadFrom(r)

View File

@ -203,14 +203,14 @@ func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.Dependen
return nil, err return nil, err
} }
// look for kubelet-<node-name> configmap from "kube-system" // look for kubelet-<node-name> configmap from "kube-system"
configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename), metav1.GetOptions{}) configmap, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename), metav1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
return configmap, nil return configmap, nil
} }
// No cloud provider yet, so can't get the nodename via Cloud.Instances().CurrentNodeName(hostname), try just using the hostname // No cloud provider yet, so can't get the nodename via Cloud.Instances().CurrentNodeName(hostname), try just using the hostname
configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname), metav1.GetOptions{}) configmap, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname), metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("cloud provider was nil, and attempt to use hostname to find config resulted in: %v", err) return nil, fmt.Errorf("cloud provider was nil, and attempt to use hostname to find config resulted in: %v", err)
} }

View File

@ -147,7 +147,7 @@ func main() {
hollowProxy, err := kubemark.NewHollowProxyOrDie( hollowProxy, err := kubemark.NewHollowProxyOrDie(
config.NodeName, config.NodeName,
internalClientset, internalClientset,
client.Core(), client.CoreV1(),
iptInterface, iptInterface,
sysctl, sysctl,
execer, execer,

View File

@ -587,17 +587,17 @@ func (f *fakeAPIFactory) ClientSet() (internalclientset.Interface, error) {
// version. // version.
fakeClient := f.tf.Client.(*fake.RESTClient) fakeClient := f.tf.Client.(*fake.RESTClient)
clientset := internalclientset.NewForConfigOrDie(f.tf.ClientConfig) clientset := internalclientset.NewForConfigOrDie(f.tf.ClientConfig)
clientset.CoreClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Core().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.AuthenticationClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Authentication().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.AuthorizationClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Authorization().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.AutoscalingClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Autoscaling().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.BatchClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Batch().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.CertificatesClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Certificates().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.ExtensionsClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Extensions().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.RbacClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Rbac().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.StorageClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Storage().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.AppsClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Apps().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.PolicyClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.Policy().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
clientset.DiscoveryClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client clientset.DiscoveryClient.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client
return clientset, f.tf.Err return clientset, f.tf.Err
} }

View File

@ -1501,7 +1501,7 @@ func TestUpdateRcWithRetries(t *testing.T) {
clientset := internalclientset.New(restClient) clientset := internalclientset.New(restClient)
if rc, err := updateRcWithRetries( if rc, err := updateRcWithRetries(
clientset, "default", rc, func(c *api.ReplicationController) { clientset.Core(), "default", rc, func(c *api.ReplicationController) {
c.Spec.Selector["baz"] = "foobar" c.Spec.Selector["baz"] = "foobar"
}); err != nil { }); err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@ -184,10 +184,10 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget)
} }
crdController := NewDiscoveryController(s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), versionDiscoveryHandler, groupDiscoveryHandler, c.GenericConfig.RequestContextMapper) crdController := NewDiscoveryController(s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), versionDiscoveryHandler, groupDiscoveryHandler, c.GenericConfig.RequestContextMapper)
namingController := status.NewNamingConditionController(s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), crdClient) namingController := status.NewNamingConditionController(s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), crdClient.Apiextensions())
finalizingController := finalizer.NewCRDFinalizer( finalizingController := finalizer.NewCRDFinalizer(
s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(),
crdClient, crdClient.Apiextensions(),
crdHandler, crdHandler,
) )

View File

@ -132,7 +132,7 @@ func failureTrap(c clientset.Interface, ns string) {
d := deployments.Items[i] d := deployments.Items[i]
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d)) framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c) _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.ExtensionsV1beta1())
if err != nil { if err != nil {
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err) framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
return return
@ -887,7 +887,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
// There should be no old RSs (overlapping RS) // There should be no old RSs (overlapping RS)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c) oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(len(oldRSs)).Should(Equal(0)) Expect(len(oldRSs)).Should(Equal(0))
Expect(len(allOldRSs)).Should(Equal(0)) Expect(len(allOldRSs)).Should(Equal(0))
@ -1035,7 +1035,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)) framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred()) Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
oldRSs, _, rs, err := deploymentutil.GetAllReplicaSets(deployment, c) oldRSs, _, rs, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, rs := range append(oldRSs, rs) { for _, rs := range append(oldRSs, rs) {
@ -1095,7 +1095,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)) framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred()) Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
oldRSs, _, rs, err = deploymentutil.GetAllReplicaSets(deployment, c) oldRSs, _, rs, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, rs := range append(oldRSs, rs) { for _, rs := range append(oldRSs, rs) {

View File

@ -163,7 +163,7 @@ var _ = SIGDescribe("DisruptionController", func() {
// Locate a running pod. // Locate a running pod.
var pod v1.Pod var pod v1.Pod
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
podList, err := cs.Pods(ns).List(metav1.ListOptions{}) podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -191,7 +191,7 @@ var _ = SIGDescribe("DisruptionController", func() {
// this gives the controller enough time to have truly set the status. // this gives the controller enough time to have truly set the status.
time.Sleep(timeout) time.Sleep(timeout)
err = cs.Pods(ns).Evict(e) err = cs.CoreV1().Pods(ns).Evict(e)
Expect(err).Should(MatchError("Cannot evict pod as it would violate the pod's disruption budget.")) Expect(err).Should(MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
} else { } else {
// Only wait for running pods in the "allow" case // Only wait for running pods in the "allow" case
@ -202,7 +202,7 @@ var _ = SIGDescribe("DisruptionController", func() {
// Since disruptionAllowed starts out false, if an eviction is ever allowed, // Since disruptionAllowed starts out false, if an eviction is ever allowed,
// that means the controller is working. // that means the controller is working.
err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
err = cs.Pods(ns).Evict(e) err = cs.CoreV1().Pods(ns).Evict(e)
if err != nil { if err != nil {
return false, nil return false, nil
} else { } else {
@ -264,7 +264,7 @@ func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
}, },
} }
_, err := cs.Pods(ns).Create(pod) _, err := cs.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns) framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns)
} }
} }

View File

@ -141,7 +141,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
framework.ExpectNoError(framework.WaitForDeploymentStatus(c, deployment)) framework.ExpectNoError(framework.WaitForDeploymentStatus(c, deployment))
By(fmt.Sprintf("Checking that replica sets for deployment %q are the same as prior to the upgrade", t.updatedD.Name)) By(fmt.Sprintf("Checking that replica sets for deployment %q are the same as prior to the upgrade", t.updatedD.Name))
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(t.updatedD, c) _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(t.updatedD, c.ExtensionsV1beta1())
framework.ExpectNoError(err) framework.ExpectNoError(err)
if newRS == nil { if newRS == nil {
By(t.spewDeploymentAndReplicaSets(newRS, allOldRSs)) By(t.spewDeploymentAndReplicaSets(newRS, allOldRSs))

View File

@ -327,7 +327,7 @@ func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string,
var clusterIngress *v1beta1.Ingress var clusterIngress *v1beta1.Ingress
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
var err error var err error
clusterIngress, err = clientset.Ingresses(namespace).Get(ingress.Name, metav1.GetOptions{}) clusterIngress, err = clientset.ExtensionsV1beta1().Ingresses(namespace).Get(ingress.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone. if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is absent", ingress.Name, namespace)) By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is absent", ingress.Name, namespace))
return true, nil // Success return true, nil // Success
@ -367,7 +367,7 @@ func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingres
func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) { func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace)) By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name, metav1.GetOptions{}) clusterIngress, err := clientset.ExtensionsV1beta1().Ingresses(namespace).Get(ingress.Name, metav1.GetOptions{})
if err == nil { // We want it present, and the Get succeeded, so we're all good. if err == nil { // We want it present, and the Get succeeded, so we're all good.
if equivalentIngress(*clusterIngress, *ingress) { if equivalentIngress(*clusterIngress, *ingress) {
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is updated", ingress.Name, namespace)) By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is updated", ingress.Name, namespace))
@ -394,7 +394,7 @@ func deleteIngressOrFail(clientset *fedclientset.Clientset, namespace string, in
if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 { if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteIngressOrFail: clientset: %v, namespace: %v, ingress: %v", clientset, namespace, ingressName)) Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteIngressOrFail: clientset: %v, namespace: %v, ingress: %v", clientset, namespace, ingressName))
} }
err := clientset.Ingresses(namespace).Delete(ingressName, &metav1.DeleteOptions{OrphanDependents: orphanDependents}) err := clientset.ExtensionsV1beta1().Ingresses(namespace).Delete(ingressName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting ingress %q from namespace %q", ingressName, namespace) framework.ExpectNoError(err, "Error deleting ingress %q from namespace %q", ingressName, namespace)
// Wait for the ingress to be deleted. // Wait for the ingress to be deleted.
err = wait.Poll(framework.Poll, FederatedIngressDeleteTimeout, func() (bool, error) { err = wait.Poll(framework.Poll, FederatedIngressDeleteTimeout, func() (bool, error) {
@ -414,7 +414,7 @@ func deleteClusterIngressOrFail(clusterName string, clientset *kubeclientset.Cli
if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 { if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteClusterIngressOrFail: cluster: %q, clientset: %v, namespace: %v, ingress: %v", clusterName, clientset, namespace, ingressName)) Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteClusterIngressOrFail: cluster: %q, clientset: %v, namespace: %v, ingress: %v", clusterName, clientset, namespace, ingressName))
} }
err := clientset.Ingresses(namespace).Delete(ingressName, metav1.NewDeleteOptions(0)) err := clientset.ExtensionsV1beta1().Ingresses(namespace).Delete(ingressName, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting cluster ingress %q/%q from cluster %q", namespace, ingressName, clusterName) framework.ExpectNoError(err, "Error deleting cluster ingress %q/%q from cluster %q", namespace, ingressName, clusterName)
} }

View File

@ -311,7 +311,7 @@ func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replic
func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterSlice, expect map[string]int32) error { func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterSlice, expect map[string]int32) error {
framework.Logf("waitForReplicaSet: %s/%s; clusters: %v; expect: %v", namespace, replicaSetName, clusters, expect) framework.Logf("waitForReplicaSet: %s/%s; clusters: %v; expect: %v", namespace, replicaSetName, clusters, expect)
err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) { err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{}) frs, err := c.ExtensionsV1beta1().ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -392,7 +392,7 @@ func updateReplicaSetOrFail(clientset *fedclientset.Clientset, replicaset *v1bet
} }
By(fmt.Sprintf("Updating federation replicaset %q in namespace %q", replicaset.Name, namespace)) By(fmt.Sprintf("Updating federation replicaset %q in namespace %q", replicaset.Name, namespace))
newRS, err := clientset.ReplicaSets(namespace).Update(replicaset) newRS, err := clientset.ExtensionsV1beta1().ReplicaSets(namespace).Update(replicaset)
framework.ExpectNoError(err, "Updating replicaset %q in namespace %q", replicaset.Name, namespace) framework.ExpectNoError(err, "Updating replicaset %q in namespace %q", replicaset.Name, namespace)
By(fmt.Sprintf("Successfully updated federation replicaset %q in namespace %q", replicaset.Name, namespace)) By(fmt.Sprintf("Successfully updated federation replicaset %q in namespace %q", replicaset.Name, namespace))

View File

@ -68,7 +68,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
By(fmt.Sprintf("Creation of service %q in namespace %q succeeded. Deleting service.", service.Name, nsName)) By(fmt.Sprintf("Creation of service %q in namespace %q succeeded. Deleting service.", service.Name, nsName))
// Cleanup // Cleanup
err := f.FederationClientset.Services(nsName).Delete(service.Name, &metav1.DeleteOptions{}) err := f.FederationClientset.CoreV1().Services(nsName).Delete(service.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace) framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace)
By(fmt.Sprintf("Deletion of service %q in namespace %q succeeded.", service.Name, nsName)) By(fmt.Sprintf("Deletion of service %q in namespace %q succeeded.", service.Name, nsName))
}) })
@ -113,7 +113,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
service = createServiceOrFail(f.FederationClientset, nsName, FederatedServiceName) service = createServiceOrFail(f.FederationClientset, nsName, FederatedServiceName)
defer func() { // Cleanup defer func() { // Cleanup
By(fmt.Sprintf("Deleting service %q in namespace %q", service.Name, nsName)) By(fmt.Sprintf("Deleting service %q in namespace %q", service.Name, nsName))
err := f.FederationClientset.Services(nsName).Delete(service.Name, &metav1.DeleteOptions{}) err := f.FederationClientset.CoreV1().Services(nsName).Delete(service.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, nsName) framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, nsName)
}() }()
By(fmt.Sprintf("Wait for service shards to be created in all clusters for service \"%s/%s\"", nsName, service.Name)) By(fmt.Sprintf("Wait for service shards to be created in all clusters for service \"%s/%s\"", nsName, service.Name))
@ -151,7 +151,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
defer func() { defer func() {
// Cleanup // Cleanup
By(fmt.Sprintf("Deleting service %q in namespace %q", service.Name, nsName)) By(fmt.Sprintf("Deleting service %q in namespace %q", service.Name, nsName))
err := f.FederationClientset.Services(nsName).Delete(service.Name, &metav1.DeleteOptions{}) err := f.FederationClientset.CoreV1().Services(nsName).Delete(service.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, nsName) framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, nsName)
}() }()
By(fmt.Sprintf("Wait for service shards to be created in all clusters for service \"%s/%s\"", nsName, service.Name)) By(fmt.Sprintf("Wait for service shards to be created in all clusters for service \"%s/%s\"", nsName, service.Name))
@ -258,7 +258,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
By("Verified that DNS rules are working as expected") By("Verified that DNS rules are working as expected")
By("Deleting the service to verify that DNS rules still work") By("Deleting the service to verify that DNS rules still work")
err := f.FederationClientset.Services(nsName).Delete(FederatedServiceName, &metav1.DeleteOptions{}) err := f.FederationClientset.CoreV1().Services(nsName).Delete(FederatedServiceName, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace) framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace)
// Service is deleted, unset the test block-global service variable. // Service is deleted, unset the test block-global service variable.
service = nil service = nil
@ -359,16 +359,16 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
} }
func updateServiceOrFail(clientset *fedclientset.Clientset, namespace, name string) *v1.Service { func updateServiceOrFail(clientset *fedclientset.Clientset, namespace, name string) *v1.Service {
service, err := clientset.Services(namespace).Get(name, metav1.GetOptions{}) service, err := clientset.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
framework.ExpectNoError(err, "Getting service %q in namespace %q", name, namespace) framework.ExpectNoError(err, "Getting service %q in namespace %q", name, namespace)
service.Spec.Selector["name"] = "update-demo" service.Spec.Selector["name"] = "update-demo"
newService, err := clientset.Services(namespace).Update(service) newService, err := clientset.CoreV1().Services(namespace).Update(service)
By(fmt.Sprintf("Successfully updated federated service %q in namespace %q", name, namespace)) By(fmt.Sprintf("Successfully updated federated service %q in namespace %q", name, namespace))
return newService return newService
} }
func deleteServiceShard(c *fedframework.Cluster, namespace, service string) error { func deleteServiceShard(c *fedframework.Cluster, namespace, service string) error {
err := c.Clientset.Services(namespace).Delete(service, &metav1.DeleteOptions{}) err := c.Clientset.CoreV1().Services(namespace).Delete(service, &metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
framework.Logf("Failed to delete service %q in namespace %q, in cluster %q", service, namespace, c.Name) framework.Logf("Failed to delete service %q in namespace %q, in cluster %q", service, namespace, c.Name)
return err return err

View File

@ -88,7 +88,7 @@ func createClusterObjectOrFail(f *fedframework.Framework, context *fedframework.
func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) { func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace)) By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterService, err := clientset.Services(namespace).Get(service.Name, metav1.GetOptions{}) clusterService, err := clientset.CoreV1().Services(namespace).Get(service.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone. if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is absent", service.Name, namespace)) By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is absent", service.Name, namespace))
return true, nil // Success return true, nil // Success
@ -141,7 +141,7 @@ func createService(clientset *fedclientset.Clientset, namespace, name string) (*
} }
By(fmt.Sprintf("Trying to create service %q in namespace %q", service.Name, namespace)) By(fmt.Sprintf("Trying to create service %q in namespace %q", service.Name, namespace))
return clientset.Services(namespace).Create(service) return clientset.CoreV1().Services(namespace).Create(service)
} }
func createLBService(clientset *fedclientset.Clientset, namespace, name string) (*v1.Service, error) { func createLBService(clientset *fedclientset.Clientset, namespace, name string) (*v1.Service, error) {
@ -178,7 +178,7 @@ func createLBService(clientset *fedclientset.Clientset, namespace, name string)
} }
By(fmt.Sprintf("Trying to create service %q in namespace %q", service.Name, namespace)) By(fmt.Sprintf("Trying to create service %q in namespace %q", service.Name, namespace))
return clientset.Services(namespace).Create(service) return clientset.CoreV1().Services(namespace).Create(service)
} }
func createServiceOrFail(clientset *fedclientset.Clientset, namespace, name string) *v1.Service { func createServiceOrFail(clientset *fedclientset.Clientset, namespace, name string) *v1.Service {
@ -200,7 +200,7 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName)) Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName))
} }
framework.Logf("Deleting service %q in namespace %v", serviceName, namespace) framework.Logf("Deleting service %q in namespace %v", serviceName, namespace)
err := clientset.Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents}) err := clientset.CoreV1().Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace) framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
// Wait for the service to be deleted. // Wait for the service to be deleted.
err = wait.Poll(5*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) { err = wait.Poll(5*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
@ -224,7 +224,7 @@ func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Serv
err := wait.PollImmediate(framework.Poll, fedframework.FederatedDefaultTestTimeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
var err error var err error
cSvc, err = c.Clientset.Services(namespace).Get(service.Name, metav1.GetOptions{}) cSvc, err = c.Clientset.CoreV1().Services(namespace).Get(service.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
// Get failed with an error, try again. // Get failed with an error, try again.
framework.Logf("Failed to find service %q in namespace %q, in cluster %q: %v. Trying again in %s", service.Name, namespace, name, err, framework.Poll) framework.Logf("Failed to find service %q in namespace %q, in cluster %q: %v. Trying again in %s", service.Name, namespace, name, err, framework.Poll)
@ -260,7 +260,7 @@ func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Serv
func cleanupServiceShard(clientset *kubeclientset.Clientset, clusterName, namespace string, service *v1.Service, timeout time.Duration) error { func cleanupServiceShard(clientset *kubeclientset.Clientset, clusterName, namespace string, service *v1.Service, timeout time.Duration) error {
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
err := clientset.Services(namespace).Delete(service.Name, &metav1.DeleteOptions{}) err := clientset.CoreV1().Services(namespace).Delete(service.Name, &metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
// Deletion failed with an error, try again. // Deletion failed with an error, try again.
framework.Logf("Failed to delete service %q in namespace %q, in cluster %q", service.Name, namespace, clusterName) framework.Logf("Failed to delete service %q in namespace %q, in cluster %q", service.Name, namespace, clusterName)

View File

@ -265,7 +265,7 @@ func updateTestContext() error {
// getNode gets node object from the apiserver. // getNode gets node object from the apiserver.
func getNode(c *clientset.Clientset) (*v1.Node, error) { func getNode(c *clientset.Clientset) (*v1.Node, error) {
nodes, err := c.Nodes().List(metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.") Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.")
if nodes == nil { if nodes == nil {
return nil, fmt.Errorf("the node list is nil.") return nil, fmt.Errorf("the node list is nil.")

View File

@ -99,7 +99,7 @@ func TestKubernetesService(t *testing.T) {
_, _, closeFn := framework.RunAMaster(config) _, _, closeFn := framework.RunAMaster(config)
defer closeFn() defer closeFn()
coreClient := clientset.NewForConfigOrDie(config.GenericConfig.LoopbackClientConfig) coreClient := clientset.NewForConfigOrDie(config.GenericConfig.LoopbackClientConfig)
if _, err := coreClient.Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}); err != nil { if _, err := coreClient.Core().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}); err != nil {
t.Fatalf("Expected kubernetes service to exists, got: %v", err) t.Fatalf("Expected kubernetes service to exists, got: %v", err)
} }
} }

View File

@ -127,13 +127,13 @@ func TestPersistentVolumeRecycler(t *testing.T) {
pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle) pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle)
pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "")
_, err := testClient.PersistentVolumes().Create(pv) _, err := testClient.CoreV1().PersistentVolumes().Create(pv)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolume: %v", err) t.Errorf("Failed to create PersistentVolume: %v", err)
} }
glog.V(2).Infof("TestPersistentVolumeRecycler pvc created") glog.V(2).Infof("TestPersistentVolumeRecycler pvc created")
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc) _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err) t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -146,7 +146,7 @@ func TestPersistentVolumeRecycler(t *testing.T) {
glog.V(2).Infof("TestPersistentVolumeRecycler pvc bound") glog.V(2).Infof("TestPersistentVolumeRecycler pvc bound")
// deleting a claim releases the volume, after which it can be recycled // deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name) t.Errorf("error deleting claim %s", pvc.Name)
} }
glog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted") glog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted")
@ -182,12 +182,12 @@ func TestPersistentVolumeDeleter(t *testing.T) {
pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete) pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete)
pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "")
_, err := testClient.PersistentVolumes().Create(pv) _, err := testClient.CoreV1().PersistentVolumes().Create(pv)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolume: %v", err) t.Errorf("Failed to create PersistentVolume: %v", err)
} }
glog.V(2).Infof("TestPersistentVolumeDeleter pv created") glog.V(2).Infof("TestPersistentVolumeDeleter pv created")
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc) _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err) t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -198,7 +198,7 @@ func TestPersistentVolumeDeleter(t *testing.T) {
glog.V(2).Infof("TestPersistentVolumeDeleter pvc bound") glog.V(2).Infof("TestPersistentVolumeDeleter pvc bound")
// deleting a claim releases the volume, after which it can be recycled // deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name) t.Errorf("error deleting claim %s", pvc.Name)
} }
glog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted") glog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted")
@ -248,7 +248,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
clone, _ := api.Scheme.DeepCopy(pvc) clone, _ := api.Scheme.DeepCopy(pvc)
newPvc, _ := clone.(*v1.PersistentVolumeClaim) newPvc, _ := clone.(*v1.PersistentVolumeClaim)
newPvc.ObjectMeta = metav1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)} newPvc.ObjectMeta = metav1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc) claim, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(newPvc)
if err != nil { if err != nil {
t.Fatalf("Error creating newPvc: %v", err) t.Fatalf("Error creating newPvc: %v", err)
} }
@ -266,7 +266,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
pv.Spec.ClaimRef = claimRef pv.Spec.ClaimRef = claimRef
pv.Spec.ClaimRef.UID = "" pv.Spec.ClaimRef.UID = ""
pv, err = testClient.PersistentVolumes().Create(pv) pv, err = testClient.CoreV1().PersistentVolumes().Create(pv)
if err != nil { if err != nil {
t.Fatalf("Unexpected error creating pv: %v", err) t.Fatalf("Unexpected error creating pv: %v", err)
} }
@ -277,7 +277,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound) waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound") glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound")
pv, err = testClient.PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) pv, err = testClient.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -323,11 +323,11 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "true"}) pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "true"})
pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "false"}) pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "false"})
_, err = testClient.PersistentVolumes().Create(pv_true) _, err = testClient.CoreV1().PersistentVolumes().Create(pv_true)
if err != nil { if err != nil {
t.Fatalf("Failed to create PersistentVolume: %v", err) t.Fatalf("Failed to create PersistentVolume: %v", err)
} }
_, err = testClient.PersistentVolumes().Create(pv_false) _, err = testClient.CoreV1().PersistentVolumes().Create(pv_false)
if err != nil { if err != nil {
t.Fatalf("Failed to create PersistentVolume: %v", err) t.Fatalf("Failed to create PersistentVolume: %v", err)
} }
@ -339,7 +339,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
}, },
} }
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc) _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Fatalf("Failed to create PersistentVolumeClaim: %v", err) t.Fatalf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -350,14 +350,14 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
t.Log("claim bound") t.Log("claim bound")
pv, err := testClient.PersistentVolumes().Get("pv-false", metav1.GetOptions{}) pv, err := testClient.CoreV1().PersistentVolumes().Get("pv-false", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
if pv.Spec.ClaimRef != nil { if pv.Spec.ClaimRef != nil {
t.Fatalf("False PV shouldn't be bound") t.Fatalf("False PV shouldn't be bound")
} }
pv, err = testClient.PersistentVolumes().Get("pv-true", metav1.GetOptions{}) pv, err = testClient.CoreV1().PersistentVolumes().Get("pv-true", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -404,11 +404,11 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""}) pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""})
pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "valB", "baz": ""}) pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "valB", "baz": ""})
_, err = testClient.PersistentVolumes().Create(pv_true) _, err = testClient.CoreV1().PersistentVolumes().Create(pv_true)
if err != nil { if err != nil {
t.Fatalf("Failed to create PersistentVolume: %v", err) t.Fatalf("Failed to create PersistentVolume: %v", err)
} }
_, err = testClient.PersistentVolumes().Create(pv_false) _, err = testClient.CoreV1().PersistentVolumes().Create(pv_false)
if err != nil { if err != nil {
t.Fatalf("Failed to create PersistentVolume: %v", err) t.Fatalf("Failed to create PersistentVolume: %v", err)
} }
@ -439,7 +439,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
}, },
} }
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc) _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Fatalf("Failed to create PersistentVolumeClaim: %v", err) t.Fatalf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -450,14 +450,14 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
t.Log("claim bound") t.Log("claim bound")
pv, err := testClient.PersistentVolumes().Get("pv-false", metav1.GetOptions{}) pv, err := testClient.CoreV1().PersistentVolumes().Get("pv-false", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
if pv.Spec.ClaimRef != nil { if pv.Spec.ClaimRef != nil {
t.Fatalf("False PV shouldn't be bound") t.Fatalf("False PV shouldn't be bound")
} }
pv, err = testClient.PersistentVolumes().Get("pv-true", metav1.GetOptions{}) pv, err = testClient.CoreV1().PersistentVolumes().Get("pv-true", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -502,7 +502,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "")
for i := 0; i < maxPVs; i++ { for i := 0; i < maxPVs; i++ {
_, err := testClient.PersistentVolumes().Create(pvs[i]) _, err := testClient.CoreV1().PersistentVolumes().Create(pvs[i])
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolume %d: %v", i, err) t.Errorf("Failed to create PersistentVolume %d: %v", i, err)
} }
@ -510,7 +510,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
} }
t.Log("volumes created") t.Log("volumes created")
_, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc) _, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err) t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -525,7 +525,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
// only one PV is bound // only one PV is bound
bound := 0 bound := 0
for i := 0; i < maxPVs; i++ { for i := 0; i < maxPVs; i++ {
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{}) pv, err := testClient.CoreV1().PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -550,7 +550,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
} }
// deleting a claim releases the volume // deleting a claim releases the volume
if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name) t.Errorf("error deleting claim %s", pvc.Name)
} }
t.Log("claim deleted") t.Log("claim deleted")
@ -599,7 +599,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// with >3000 volumes. // with >3000 volumes.
go func() { go func() {
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
_, _ = testClient.PersistentVolumes().Create(pvs[i]) _, _ = testClient.CoreV1().PersistentVolumes().Create(pvs[i])
} }
}() }()
// Wait for them to get Available // Wait for them to get Available
@ -620,7 +620,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// Modify PV // Modify PV
i := rand.Intn(objCount) i := rand.Intn(objCount)
name := "pv-" + strconv.Itoa(i) name := "pv-" + strconv.Itoa(i)
pv, err := testClient.PersistentVolumes().Get(name, metav1.GetOptions{}) pv, err := testClient.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
// Silently ignore error, the PV may have be already deleted // Silently ignore error, the PV may have be already deleted
// or not exists yet. // or not exists yet.
@ -632,7 +632,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
} else { } else {
pv.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int()) pv.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int())
} }
_, err = testClient.PersistentVolumes().Update(pv) _, err = testClient.CoreV1().PersistentVolumes().Update(pv)
if err != nil { if err != nil {
// Silently ignore error, the PV may have been updated by // Silently ignore error, the PV may have been updated by
// the controller. // the controller.
@ -644,7 +644,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// Modify PVC // Modify PVC
i := rand.Intn(objCount) i := rand.Intn(objCount)
name := "pvc-" + strconv.Itoa(i) name := "pvc-" + strconv.Itoa(i)
pvc, err := testClient.PersistentVolumeClaims(metav1.NamespaceDefault).Get(name, metav1.GetOptions{}) pvc, err := testClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceDefault).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
// Silently ignore error, the PVC may have be already // Silently ignore error, the PVC may have be already
// deleted or not exists yet. // deleted or not exists yet.
@ -656,7 +656,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
} else { } else {
pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int()) pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int())
} }
_, err = testClient.PersistentVolumeClaims(metav1.NamespaceDefault).Update(pvc) _, err = testClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceDefault).Update(pvc)
if err != nil { if err != nil {
// Silently ignore error, the PVC may have been updated by // Silently ignore error, the PVC may have been updated by
// the controller. // the controller.
@ -679,7 +679,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// Create the claims, again in a separate goroutine. // Create the claims, again in a separate goroutine.
go func() { go func() {
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
_, _ = testClient.PersistentVolumeClaims(ns.Name).Create(pvcs[i]) _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvcs[i])
} }
}() }()
@ -699,7 +699,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// check that everything is bound to something // check that everything is bound to something
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{}) pv, err := testClient.CoreV1().PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -708,7 +708,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
} }
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{}) pvc, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pvc: %v", err) t.Fatalf("Unexpected error getting pvc: %v", err)
} }
@ -748,13 +748,13 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
pvc := createPVC(pvcName, ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") pvc := createPVC(pvcName, ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "")
pvc.Annotations = map[string]string{"annBindCompleted": ""} pvc.Annotations = map[string]string{"annBindCompleted": ""}
pvc.Spec.VolumeName = pvName pvc.Spec.VolumeName = pvName
newPVC, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc) newPVC, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Fatalf("Cannot create claim %q: %v", pvc.Name, err) t.Fatalf("Cannot create claim %q: %v", pvc.Name, err)
} }
// Save Bound status as a separate transaction // Save Bound status as a separate transaction
newPVC.Status.Phase = v1.ClaimBound newPVC.Status.Phase = v1.ClaimBound
newPVC, err = testClient.PersistentVolumeClaims(ns.Name).UpdateStatus(newPVC) newPVC, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).UpdateStatus(newPVC)
if err != nil { if err != nil {
t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err) t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err)
} }
@ -772,13 +772,13 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
return return
} }
pv.Spec.ClaimRef = claimRef pv.Spec.ClaimRef = claimRef
newPV, err := testClient.PersistentVolumes().Create(pv) newPV, err := testClient.CoreV1().PersistentVolumes().Create(pv)
if err != nil { if err != nil {
t.Fatalf("Cannot create volume %q: %v", pv.Name, err) t.Fatalf("Cannot create volume %q: %v", pv.Name, err)
} }
// Save Bound status as a separate transaction // Save Bound status as a separate transaction
newPV.Status.Phase = v1.VolumeBound newPV.Status.Phase = v1.VolumeBound
newPV, err = testClient.PersistentVolumes().UpdateStatus(newPV) newPV, err = testClient.CoreV1().PersistentVolumes().UpdateStatus(newPV)
if err != nil { if err != nil {
t.Fatalf("Cannot update volume status %q: %v", pv.Name, err) t.Fatalf("Cannot update volume status %q: %v", pv.Name, err)
} }
@ -829,7 +829,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
// check that everything is bound to something // check that everything is bound to something
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{}) pv, err := testClient.CoreV1().PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -838,7 +838,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
} }
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{}) pvc, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pvc: %v", err) t.Fatalf("Unexpected error getting pvc: %v", err)
} }
@ -895,7 +895,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
// early. It gets stuck with >3000 claims. // early. It gets stuck with >3000 claims.
go func() { go func() {
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
_, _ = testClient.PersistentVolumeClaims(ns.Name).Create(pvcs[i]) _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvcs[i])
} }
}() }()
@ -907,7 +907,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound") glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound")
// check that we have enough bound PVs // check that we have enough bound PVs
pvList, err := testClient.PersistentVolumes().List(metav1.ListOptions{}) pvList, err := testClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to list volumes: %s", err) t.Fatalf("Failed to list volumes: %s", err)
} }
@ -924,13 +924,13 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
// Delete the claims // Delete the claims
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
_ = testClient.PersistentVolumeClaims(ns.Name).Delete(pvcs[i].Name, nil) _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvcs[i].Name, nil)
} }
// Wait for the PVs to get deleted by listing remaining volumes // Wait for the PVs to get deleted by listing remaining volumes
// (delete events were unreliable) // (delete events were unreliable)
for { for {
volumes, err := testClient.PersistentVolumes().List(metav1.ListOptions{}) volumes, err := testClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to list volumes: %v", err) t.Fatalf("Failed to list volumes: %v", err)
} }
@ -974,17 +974,17 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
pvc := createPVC("pvc-rwm", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, "") pvc := createPVC("pvc-rwm", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, "")
_, err := testClient.PersistentVolumes().Create(pv_rwm) _, err := testClient.CoreV1().PersistentVolumes().Create(pv_rwm)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolume: %v", err) t.Errorf("Failed to create PersistentVolume: %v", err)
} }
_, err = testClient.PersistentVolumes().Create(pv_rwo) _, err = testClient.CoreV1().PersistentVolumes().Create(pv_rwo)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolume: %v", err) t.Errorf("Failed to create PersistentVolume: %v", err)
} }
t.Log("volumes created") t.Log("volumes created")
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc) _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err) t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -997,14 +997,14 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
t.Log("claim bound") t.Log("claim bound")
// only RWM PV is bound // only RWM PV is bound
pv, err := testClient.PersistentVolumes().Get("pv-rwo", metav1.GetOptions{}) pv, err := testClient.CoreV1().PersistentVolumes().Get("pv-rwo", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
if pv.Spec.ClaimRef != nil { if pv.Spec.ClaimRef != nil {
t.Fatalf("ReadWriteOnce PV shouldn't be bound") t.Fatalf("ReadWriteOnce PV shouldn't be bound")
} }
pv, err = testClient.PersistentVolumes().Get("pv-rwm", metav1.GetOptions{}) pv, err = testClient.CoreV1().PersistentVolumes().Get("pv-rwm", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -1016,7 +1016,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
} }
// deleting a claim releases the volume // deleting a claim releases the volume
if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name) t.Errorf("error deleting claim %s", pvc.Name)
} }
t.Log("claim deleted") t.Log("claim deleted")
@ -1142,11 +1142,11 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio
t.Fatalf("Failed to construct PersistentVolumes: %v", err) t.Fatalf("Failed to construct PersistentVolumes: %v", err)
} }
watchPV, err := testClient.PersistentVolumes().Watch(metav1.ListOptions{}) watchPV, err := testClient.CoreV1().PersistentVolumes().Watch(metav1.ListOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to watch PersistentVolumes: %v", err) t.Fatalf("Failed to watch PersistentVolumes: %v", err)
} }
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(metav1.ListOptions{}) watchPVC, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Watch(metav1.ListOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to watch PersistentVolumeClaims: %v", err) t.Fatalf("Failed to watch PersistentVolumeClaims: %v", err)
} }

View File

@ -92,7 +92,7 @@ func main() {
var nodes *v1.NodeList var nodes *v1.NodeList
for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) {
nodes, err = client.Nodes().List(metav1.ListOptions{}) nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{})
if err == nil { if err == nil {
break break
} }
@ -114,7 +114,7 @@ func main() {
queries := *queriesAverage * len(nodes.Items) * *podsPerNode queries := *queriesAverage * len(nodes.Items) * *podsPerNode
// Create the namespace // Create the namespace
got, err := client.Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}}) got, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}})
if err != nil { if err != nil {
glog.Fatalf("Failed to create namespace: %v", err) glog.Fatalf("Failed to create namespace: %v", err)
} }
@ -125,7 +125,7 @@ func main() {
} else { } else {
// wait until the namespace disappears // wait until the namespace disappears
for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ {
if _, err := client.Namespaces().Get(ns, metav1.GetOptions{}); err != nil { if _, err := client.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
return return
} }
@ -142,7 +142,7 @@ func main() {
var svc *v1.Service var svc *v1.Service
for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) {
t := time.Now() t := time.Now()
svc, err = client.Services(ns).Create(&v1.Service{ svc, err = client.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "serve-hostnames", Name: "serve-hostnames",
Labels: map[string]string{ Labels: map[string]string{
@ -175,7 +175,7 @@ func main() {
glog.Infof("Cleaning up service %s/serve-hostnames", ns) glog.Infof("Cleaning up service %s/serve-hostnames", ns)
// Make several attempts to delete the service. // Make several attempts to delete the service.
for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
if err := client.Services(ns).Delete(svc.Name, nil); err == nil { if err := client.CoreV1().Services(ns).Delete(svc.Name, nil); err == nil {
return return
} }
glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err)
@ -192,7 +192,7 @@ func main() {
for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) {
glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name)
t := time.Now() t := time.Now()
_, err = client.Pods(ns).Create(&v1.Pod{ _, err = client.CoreV1().Pods(ns).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{ Labels: map[string]string{
@ -228,7 +228,7 @@ func main() {
// Make several attempts to delete the pods. // Make several attempts to delete the pods.
for _, podName := range podNames { for _, podName := range podNames {
for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
if err = client.Pods(ns).Delete(podName, nil); err == nil { if err = client.CoreV1().Pods(ns).Delete(podName, nil); err == nil {
break break
} }
glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err)
@ -240,7 +240,7 @@ func main() {
for _, podName := range podNames { for _, podName := range podNames {
var pod *v1.Pod var pod *v1.Pod
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) {
pod, err = client.Pods(ns).Get(podName, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil { if err != nil {
glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout)
continue continue