diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 14529e31db..ed9f24ecb6 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/cmd/kube-proxy/app/options" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/record" @@ -400,7 +401,7 @@ func getConntrackMax(config *options.ProxyServerConfig) (int, error) { } type nodeGetter interface { - Get(hostname string) (*api.Node, error) + Get(hostname string, options metav1.GetOptions) (*api.Node, error) } func getProxyMode(proxyMode string, client nodeGetter, hostname string, iptver iptables.IPTablesVersioner, kcompat iptables.KernelCompatTester) string { @@ -417,7 +418,7 @@ func getProxyMode(proxyMode string, client nodeGetter, hostname string, iptver i glog.Errorf("nodeGetter is nil: assuming iptables proxy") return tryIPTablesProxy(iptver, kcompat) } - node, err := client.Get(hostname) + node, err := client.Get(hostname, metav1.GetOptions{}) if err != nil { glog.Errorf("Can't get Node %q, assuming iptables proxy, err: %v", hostname, err) return tryIPTablesProxy(iptver, kcompat) @@ -464,7 +465,7 @@ func (s *ProxyServer) birthCry() { func getNodeIP(client clientset.Interface, hostname string) net.IP { var nodeIP net.IP - node, err := client.Core().Nodes().Get(hostname) + node, err := client.Core().Nodes().Get(hostname, metav1.GetOptions{}) if err != nil { glog.Warningf("Failed to retrieve node info: %v", err) return nil diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index 97fc28db54..2f55566299 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/cmd/kube-proxy/app/options" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/componentconfig" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/util/iptables" ) @@ -34,7 +35,7 @@ type fakeNodeInterface struct { node api.Node } -func (fake *fakeNodeInterface) Get(hostname string) (*api.Node, error) { +func (fake *fakeNodeInterface) Get(hostname string, options metav1.GetOptions) (*api.Node, error) { return &fake.node, nil } diff --git a/cmd/kubeadm/app/master/apiclient.go b/cmd/kubeadm/app/master/apiclient.go index f6b5fbf466..d394a341d4 100644 --- a/cmd/kubeadm/app/master/apiclient.go +++ b/cmd/kubeadm/app/master/apiclient.go @@ -260,7 +260,7 @@ func createDummyDeployment(client *clientset.Clientset) { }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { - d, err := client.Extensions().Deployments(api.NamespaceSystem).Get("dummy") + d, err := client.Extensions().Deployments(api.NamespaceSystem).Get("dummy", metav1.GetOptions{}) if err != nil { fmt.Printf(" failed to get test deployment [%v] (will retry)", err) return false, nil diff --git a/cmd/kubeadm/app/master/discovery.go b/cmd/kubeadm/app/master/discovery.go index 64ce8ec0ef..e56a018f88 100644 --- a/cmd/kubeadm/app/master/discovery.go +++ b/cmd/kubeadm/app/master/discovery.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" certutil "k8s.io/kubernetes/pkg/util/cert" "k8s.io/kubernetes/pkg/util/wait" @@ -133,7 +134,7 @@ func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, cli start := time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { - d, err := client.Extensions().Deployments(api.NamespaceSystem).Get(kubeDiscoveryName) + d, err := client.Extensions().Deployments(api.NamespaceSystem).Get(kubeDiscoveryName, metav1.GetOptions{}) if err != nil { return false, nil } diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 9ae6ed76f0..4cc89acc4e 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/componentconfig" componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/client/chaosclient" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" @@ -188,14 +189,14 @@ func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletD return nil, err } // look for kubelet- configmap from "kube-system" - configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename)) + configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename), metav1.GetOptions{}) if err != nil { return nil, err } return configmap, nil } // No cloud provider yet, so can't get the nodename via Cloud.Instances().CurrentNodeName(hostname), try just using the hostname - configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname)) + configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname), metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("cloud provider was nil, and attempt to use hostname to find config resulted in: %v", err) } diff --git a/federation/pkg/federation-controller/replicaset/replicasetcontroller_test.go b/federation/pkg/federation-controller/replicaset/replicasetcontroller_test.go index 44a5c9d986..15cd262d96 100644 --- a/federation/pkg/federation-controller/replicaset/replicasetcontroller_test.go +++ b/federation/pkg/federation-controller/replicaset/replicasetcontroller_test.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/api/meta" apiv1 "k8s.io/kubernetes/pkg/api/v1" extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientfake "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" "k8s.io/kubernetes/pkg/client/testing/core" @@ -122,7 +123,7 @@ func TestReplicaSetController(t *testing.T) { fedrswatch.Add(rs) time.Sleep(1 * time.Second) - rs1, _ := kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name) + rs1, _ := kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) kube1rswatch.Add(rs1) rs1.Status.Replicas = *rs1.Spec.Replicas rs1.Status.FullyLabeledReplicas = *rs1.Spec.Replicas @@ -131,7 +132,7 @@ func TestReplicaSetController(t *testing.T) { rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs1) kube1rswatch.Modify(rs1) - rs2, _ := kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name) + rs2, _ := kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) kube2rswatch.Add(rs2) rs2.Status.Replicas = *rs2.Spec.Replicas rs2.Status.FullyLabeledReplicas = *rs2.Spec.Replicas @@ -141,7 +142,7 @@ func TestReplicaSetController(t *testing.T) { kube2rswatch.Modify(rs2) time.Sleep(1 * time.Second) - rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name) + rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas) assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas) assert.Equal(t, rs.Status.FullyLabeledReplicas, rs1.Status.FullyLabeledReplicas+rs2.Status.FullyLabeledReplicas) @@ -154,7 +155,7 @@ func TestReplicaSetController(t *testing.T) { fedrswatch.Modify(rs) time.Sleep(1 * time.Second) - rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name) + rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) rs1.Status.Replicas = *rs1.Spec.Replicas rs1.Status.FullyLabeledReplicas = *rs1.Spec.Replicas rs1.Status.ReadyReplicas = *rs1.Spec.Replicas @@ -162,7 +163,7 @@ func TestReplicaSetController(t *testing.T) { rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs1) kube1rswatch.Modify(rs1) - rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name) + rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) rs2.Status.Replicas = *rs2.Spec.Replicas rs2.Status.FullyLabeledReplicas = *rs2.Spec.Replicas rs2.Status.ReadyReplicas = *rs2.Spec.Replicas @@ -171,7 +172,7 @@ func TestReplicaSetController(t *testing.T) { kube2rswatch.Modify(rs2) time.Sleep(1 * time.Second) - rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name) + rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name, metav1.GetOptions{}) assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas) assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas) assert.Equal(t, rs.Status.FullyLabeledReplicas, rs1.Status.FullyLabeledReplicas+rs2.Status.FullyLabeledReplicas) diff --git a/federation/pkg/federation-controller/service/service_helper.go b/federation/pkg/federation-controller/service/service_helper.go index 2c3b704b15..983e947861 100644 --- a/federation/pkg/federation-controller/service/service_helper.go +++ b/federation/pkg/federation-controller/service/service_helper.go @@ -23,6 +23,7 @@ import ( fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" "k8s.io/kubernetes/pkg/api/errors" v1 "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" cache "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/controller" @@ -136,7 +137,7 @@ func (cc *clusterClientCache) syncService(key, clusterName string, clusterCache if isDeletion { // cachedService is not reliable here as // deleting cache is the last step of federation service deletion - _, err := fedClient.Core().Services(cachedService.lastState.Namespace).Get(cachedService.lastState.Name) + _, err := fedClient.Core().Services(cachedService.lastState.Namespace).Get(cachedService.lastState.Name, metav1.GetOptions{}) // rebuild service if federation service still exists if err == nil || !errors.IsNotFound(err) { return sc.ensureClusterService(cachedService, clusterName, cachedService.appliedState, clusterCache.clientset) @@ -263,7 +264,7 @@ func (cc *clusterClientCache) persistFedServiceUpdate(cachedService *cachedServi glog.V(5).Infof("Persist federation service status %s/%s", service.Namespace, service.Name) var err error for i := 0; i < clientRetryCount; i++ { - _, err := fedClient.Core().Services(service.Namespace).Get(service.Name) + _, err := fedClient.Core().Services(service.Namespace).Get(service.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v", service.Namespace, service.Name, err) diff --git a/federation/pkg/federation-controller/service/servicecontroller.go b/federation/pkg/federation-controller/service/servicecontroller.go index 66aff482f7..4df8bc6eaa 100644 --- a/federation/pkg/federation-controller/service/servicecontroller.go +++ b/federation/pkg/federation-controller/service/servicecontroller.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" v1 "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" cache "k8s.io/kubernetes/pkg/client/cache" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/record" @@ -390,7 +391,7 @@ func (s *ServiceController) ensureClusterService(cachedService *cachedService, c var err error var needUpdate bool for i := 0; i < clientRetryCount; i++ { - svc, err := client.Core().Services(service.Namespace).Get(service.Name) + svc, err := client.Core().Services(service.Namespace).Get(service.Name, metav1.GetOptions{}) if err == nil { // service exists glog.V(5).Infof("Found service %s/%s from cluster %s", service.Namespace, service.Name, clusterName) diff --git a/federation/pkg/federation-controller/util/cluster_util.go b/federation/pkg/federation-controller/util/cluster_util.go index 5374ef1606..11b64c6869 100644 --- a/federation/pkg/federation-controller/util/cluster_util.go +++ b/federation/pkg/federation-controller/util/cluster_util.go @@ -26,6 +26,7 @@ import ( federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" "k8s.io/kubernetes/pkg/api" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" @@ -113,7 +114,7 @@ var KubeconfigGetterForSecret = func(secretName string) clientcmd.KubeconfigGett data = []byte{} var secret *api.Secret err = wait.PollImmediate(1*time.Second, getSecretTimeout, func() (bool, error) { - secret, err = client.Core().Secrets(namespace).Get(secretName) + secret, err = client.Core().Secrets(namespace).Get(secretName, metav1.GetOptions{}) if err == nil { return true, nil } diff --git a/federation/pkg/kubefed/init/init.go b/federation/pkg/kubefed/init/init.go index a07a26786a..9bfb4893fc 100644 --- a/federation/pkg/kubefed/init/init.go +++ b/federation/pkg/kubefed/init/init.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/apis/extensions" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" @@ -286,7 +287,7 @@ func waitForLoadBalancerAddress(clientset *client.Clientset, svc *api.Service, d } err := wait.PollImmediateInfinite(lbAddrRetryInterval, func() (bool, error) { - pollSvc, err := clientset.Core().Services(svc.Namespace).Get(svc.Name) + pollSvc, err := clientset.Core().Services(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } diff --git a/pkg/client/leaderelection/resourcelock/endpointslock.go b/pkg/client/leaderelection/resourcelock/endpointslock.go index 9ebd12af5c..b2a01e00cd 100644 --- a/pkg/client/leaderelection/resourcelock/endpointslock.go +++ b/pkg/client/leaderelection/resourcelock/endpointslock.go @@ -22,6 +22,7 @@ import ( "fmt" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" ) @@ -37,7 +38,7 @@ type EndpointsLock struct { func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) { var record LeaderElectionRecord var err error - el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name) + el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/pkg/client/testing/core/fake_test.go b/pkg/client/testing/core/fake_test.go index 1170d10fdb..78f0286df3 100644 --- a/pkg/client/testing/core/fake_test.go +++ b/pkg/client/testing/core/fake_test.go @@ -20,6 +20,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientsetfake "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" ) @@ -60,7 +61,7 @@ func TestFakeClientSetFiltering(t *testing.T) { } } - pod1, err := tc.Core().Pods("nsA").Get("pod-1") + pod1, err := tc.Core().Pods("nsA").Get("pod-1", metav1.GetOptions{}) if err != nil { t.Fatalf("Pods.Get: %s", err) } @@ -71,7 +72,7 @@ func TestFakeClientSetFiltering(t *testing.T) { t.Fatalf("Expected to find pod nsA/pod-1t, got %s/%s", pod1.Namespace, pod1.Name) } - wrongPod, err := tc.Core().Pods("nsB").Get("pod-1") + wrongPod, err := tc.Core().Pods("nsB").Get("pod-1", metav1.GetOptions{}) if err == nil { t.Fatalf("Pods.Get: expected nsB/pod-1 not to match, but it matched %s/%s", wrongPod.Namespace, wrongPod.Name) } diff --git a/pkg/client/unversioned/conditions.go b/pkg/client/unversioned/conditions.go index 077dff362e..c5ed6a806d 100644 --- a/pkg/client/unversioned/conditions.go +++ b/pkg/client/unversioned/conditions.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" @@ -42,7 +43,7 @@ func ControllerHasDesiredReplicas(rcClient coreclient.ReplicationControllersGett desiredGeneration := controller.Generation return func() (bool, error) { - ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name) + ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -64,7 +65,7 @@ func ReplicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, r desiredGeneration := replicaSet.Generation return func() (bool, error) { - rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name) + rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -81,7 +82,7 @@ func ReplicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, r func StatefulSetHasDesiredPets(psClient appsclient.StatefulSetsGetter, petset *apps.StatefulSet) wait.ConditionFunc { // TODO: Differentiate between 0 pets and a really quick scale down using generation. return func() (bool, error) { - ps, err := psClient.StatefulSets(petset.Namespace).Get(petset.Name) + ps, err := psClient.StatefulSets(petset.Namespace).Get(petset.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -93,7 +94,7 @@ func StatefulSetHasDesiredPets(psClient appsclient.StatefulSetsGetter, petset *a // for a job equals the current active counts or is less by an appropriate successful/unsuccessful count. func JobHasDesiredParallelism(jobClient batchclient.JobsGetter, job *batch.Job) wait.ConditionFunc { return func() (bool, error) { - job, err := jobClient.Jobs(job.Namespace).Get(job.Name) + job, err := jobClient.Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -124,7 +125,7 @@ func DeploymentHasDesiredReplicas(dClient extensionsclient.DeploymentsGetter, de desiredGeneration := deployment.Generation return func() (bool, error) { - deployment, err := dClient.Deployments(deployment.Namespace).Get(deployment.Name) + deployment, err := dClient.Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/pkg/controller/client_builder.go b/pkg/controller/client_builder.go index 3913a1bd3c..4e197638fe 100644 --- a/pkg/controller/client_builder.go +++ b/pkg/controller/client_builder.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" apierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" @@ -99,13 +100,13 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro clientConfig := restclient.AnonymousClientConfig(b.ClientConfig) // we need the SA UID to find a matching SA token - sa, err := b.CoreClient.ServiceAccounts(b.Namespace).Get(name) + sa, err := b.CoreClient.ServiceAccounts(b.Namespace).Get(name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return nil, err } else if apierrors.IsNotFound(err) { // check to see if the namespace exists. If it isn't a NotFound, just try to create the SA. // It'll probably fail, but perhaps that will have a better message. - if _, err := b.CoreClient.Namespaces().Get(b.Namespace); apierrors.IsNotFound(err) { + if _, err := b.CoreClient.Namespaces().Get(b.Namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { _, err = b.CoreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: b.Namespace}}) if err != nil && !apierrors.IsAlreadyExists(err) { return nil, err diff --git a/pkg/controller/cronjob/injection.go b/pkg/controller/cronjob/injection.go index f62d44fde1..969c964470 100644 --- a/pkg/controller/cronjob/injection.go +++ b/pkg/controller/cronjob/injection.go @@ -22,6 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1" batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/labels" @@ -97,7 +98,7 @@ func copyAnnotations(template *batch.JobTemplateSpec) labels.Set { } func (r realJobControl) GetJob(namespace, name string) (*batch.Job, error) { - return r.KubeClient.BatchV2alpha1().Jobs(namespace).Get(name) + return r.KubeClient.BatchV2alpha1().Jobs(namespace).Get(name, metav1.GetOptions{}) } func (r realJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) { diff --git a/pkg/controller/daemon/daemoncontroller.go b/pkg/controller/daemon/daemoncontroller.go index 3cd2e2ade6..b7ebab215d 100644 --- a/pkg/controller/daemon/daemoncontroller.go +++ b/pkg/controller/daemon/daemoncontroller.go @@ -551,7 +551,7 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds } // Update the set with the latest resource version for the next poll - if ds, getErr = dsClient.Get(ds.Name); getErr != nil { + if ds, getErr = dsClient.Get(ds.Name, metav1.GetOptions{}); getErr != nil { // If the GET fails we can't trust status.Replicas anymore. This error // is bound to be more interesting than the update failure. return getErr diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index bb129f6fd6..2439a33892 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -677,7 +677,7 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions. // WaitForReplicaSetUpdated polls the replica set until it is updated. func WaitForReplicaSetUpdated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { return wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - rs, err := c.Extensions().ReplicaSets(namespace).Get(name) + rs, err := c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return false, err } @@ -688,7 +688,7 @@ func WaitForReplicaSetUpdated(c clientset.Interface, desiredGeneration int64, na // WaitForPodsHashPopulated polls the replica set until updated and fully labeled. func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { - rs, err := c.Extensions().ReplicaSets(namespace).Get(name) + rs, err := c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index 2cfc8e785e..06cf61baf7 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -697,7 +697,7 @@ func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget, // returns the old PDB. Intended to be used in a retry loop where it runs a // bounded number of times. func refresh(pdbClient policyclientset.PodDisruptionBudgetInterface, pdb *policy.PodDisruptionBudget) *policy.PodDisruptionBudget { - newPdb, err := pdbClient.Get(pdb.Name) + newPdb, err := pdbClient.Get(pdb.Name, metav1.GetOptions{}) if err == nil { return newPdb } else { diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 86c8306062..02ab04ce4d 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1/endpoints" podutil "k8s.io/kubernetes/pkg/api/v1/pod" utilpod "k8s.io/kubernetes/pkg/api/v1/pod" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/controller" @@ -450,7 +451,7 @@ func (e *EndpointController) syncService(key string) error { subsets = endpoints.RepackSubsets(subsets) // See if there's actually an update here. - currentEndpoints, err := e.client.Core().Endpoints(service.Namespace).Get(service.Name) + currentEndpoints, err := e.client.Core().Endpoints(service.Namespace).Get(service.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { currentEndpoints = &v1.Endpoints{ diff --git a/pkg/controller/namespace/namespace_controller_utils.go b/pkg/controller/namespace/namespace_controller_utils.go index 779e90c054..f484fbe6dc 100644 --- a/pkg/controller/namespace/namespace_controller_utils.go +++ b/pkg/controller/namespace/namespace_controller_utils.go @@ -96,7 +96,7 @@ func retryOnConflictError(kubeClient clientset.Interface, namespace *v1.Namespac return nil, err } prevNamespace := latestNamespace - latestNamespace, err = kubeClient.Core().Namespaces().Get(latestNamespace.Name) + latestNamespace, err = kubeClient.Core().Namespaces().Get(latestNamespace.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -379,7 +379,7 @@ func syncNamespace( // multiple controllers may edit a namespace during termination // first get the latest state of the namespace before proceeding // if the namespace was deleted already, don't do anything - namespace, err := kubeClient.Core().Namespaces().Get(namespace.Name) + namespace, err := kubeClient.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return nil diff --git a/pkg/controller/node/cidr_allocator.go b/pkg/controller/node/cidr_allocator.go index 81b8de1441..ca62c3bebd 100644 --- a/pkg/controller/node/cidr_allocator.go +++ b/pkg/controller/node/cidr_allocator.go @@ -24,6 +24,7 @@ import ( apierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/util/sets" @@ -229,7 +230,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { defer r.removeNodeFromProcessing(data.nodeName) for rep := 0; rep < podCIDRUpdateRetry; rep++ { // TODO: change it to using PATCH instead of full Node updates. - node, err = r.client.Core().Nodes().Get(data.nodeName) + node, err = r.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{}) if err != nil { glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err) continue diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index bb9501335b..cb961b96d4 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -451,7 +451,7 @@ func (nc *NodeController) monitorNodeStatus() error { break } name := node.Name - node, err = nc.kubeClient.Core().Nodes().Get(name) + node, err = nc.kubeClient.Core().Nodes().Get(name, metav1.GetOptions{}) if err != nil { glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name) break diff --git a/pkg/controller/node/testutil/test_utils.go b/pkg/controller/node/testutil/test_utils.go index fcda83477c..127d967f9e 100644 --- a/pkg/controller/node/testutil/test_utils.go +++ b/pkg/controller/node/testutil/test_utils.go @@ -107,7 +107,7 @@ func (m *FakeNodeHandler) Create(node *v1.Node) (*v1.Node, error) { } // Get returns a Node from the fake store. -func (m *FakeNodeHandler) Get(name string) (*v1.Node, error) { +func (m *FakeNodeHandler) Get(name string, opts metav1.GetOptions) (*v1.Node, error) { m.lock.Lock() defer func() { m.RequestCount++ diff --git a/pkg/controller/petset/pet.go b/pkg/controller/petset/pet.go index 8e5fbe3199..59170f61a1 100644 --- a/pkg/controller/petset/pet.go +++ b/pkg/controller/petset/pet.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/v1" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/runtime" @@ -183,7 +184,7 @@ type apiServerPetClient struct { // Get gets the pet in the pcb from the apiserver. func (p *apiServerPetClient) Get(pet *pcb) (*pcb, bool, error) { ns := pet.parent.Namespace - pod, err := p.c.Core().Pods(ns).Get(pet.pod.Name) + pod, err := p.c.Core().Pods(ns).Get(pet.pod.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { return nil, false, nil } @@ -228,7 +229,7 @@ func (p *apiServerPetClient) Update(pet *pcb, expectedPet *pcb) (updateErr error if updateErr == nil || i >= updateRetries { return updateErr } - getPod, getErr := pc.Get(updatePod.Name) + getPod, getErr := pc.Get(updatePod.Name, metav1.GetOptions{}) if getErr != nil { return getErr } @@ -243,7 +244,7 @@ func (p *apiServerPetClient) DeletePVCs(pet *pcb) error { } func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*v1.PersistentVolumeClaim, error) { - pvc, err := p.c.Core().PersistentVolumeClaims(pvcNamespace).Get(pvcName) + pvc, err := p.c.Core().PersistentVolumeClaims(pvcNamespace).Get(pvcName, metav1.GetOptions{}) return pvc, err } diff --git a/pkg/controller/petset/pet_set_utils.go b/pkg/controller/petset/pet_set_utils.go index 343f294404..eca965afae 100644 --- a/pkg/controller/petset/pet_set_utils.go +++ b/pkg/controller/petset/pet_set_utils.go @@ -22,6 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/cache" appsclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1" "k8s.io/kubernetes/pkg/controller" @@ -58,7 +59,7 @@ func updatePetCount(psClient appsclientset.StatefulSetsGetter, ps apps.StatefulS if updateErr == nil || i >= statusUpdateRetries { return updateErr } - if ps, getErr = psClient.StatefulSets(ps.Namespace).Get(ps.Name); getErr != nil { + if ps, getErr = psClient.StatefulSets(ps.Namespace).Get(ps.Name, metav1.GetOptions{}); getErr != nil { return getErr } } diff --git a/pkg/controller/replicaset/replica_set_utils.go b/pkg/controller/replicaset/replica_set_utils.go index 3c55f13087..03ef901089 100644 --- a/pkg/controller/replicaset/replica_set_utils.go +++ b/pkg/controller/replicaset/replica_set_utils.go @@ -75,7 +75,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs exte return updateErr } // Update the ReplicaSet with the latest resource version for the next poll - if rs, getErr = c.Get(rs.Name); getErr != nil { + if rs, getErr = c.Get(rs.Name, metav1.GetOptions{}); getErr != nil { // If the GET fails we can't trust status.Replicas anymore. This error // is bound to be more interesting than the update failure. return getErr diff --git a/pkg/controller/replication/replication_controller_utils.go b/pkg/controller/replication/replication_controller_utils.go index dc07177fde..111bc1f8c6 100644 --- a/pkg/controller/replication/replication_controller_utils.go +++ b/pkg/controller/replication/replication_controller_utils.go @@ -63,7 +63,7 @@ func updateReplicationControllerStatus(c v1core.ReplicationControllerInterface, return updateErr } // Update the controller with the latest resource version for the next poll - if rc, getErr = c.Get(rc.Name); getErr != nil { + if rc, getErr = c.Get(rc.Name, metav1.GetOptions{}); getErr != nil { // If the GET fails we can't trust status.Replicas anymore. This error // is bound to be more interesting than the update failure. return getErr diff --git a/pkg/controller/serviceaccount/tokengetter.go b/pkg/controller/serviceaccount/tokengetter.go index 37b88f2b5a..61937dffcd 100644 --- a/pkg/controller/serviceaccount/tokengetter.go +++ b/pkg/controller/serviceaccount/tokengetter.go @@ -43,10 +43,10 @@ func NewGetterFromClient(c clientset.Interface) serviceaccount.ServiceAccountTok return clientGetter{c} } func (c clientGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) { - return c.client.Core().ServiceAccounts(namespace).Get(name) + return c.client.Core().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) } func (c clientGetter) GetSecret(namespace, name string) (*v1.Secret, error) { - return c.client.Core().Secrets(namespace).Get(name) + return c.client.Core().Secrets(namespace).Get(name, metav1.GetOptions{}) } // registryGetter implements ServiceAccountTokenGetter using a service account and secret registry diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index 189c98257f..b69b581f80 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api" apierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientretry "k8s.io/kubernetes/pkg/client/retry" @@ -384,7 +385,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // We don't want to update the cache's copy of the service account // so add the secret to a freshly retrieved copy of the service account serviceAccounts := e.client.Core().ServiceAccounts(serviceAccount.Namespace) - liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name) + liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name, metav1.GetOptions{}) if err != nil { // Retry for any error other than a NotFound return !apierrors.IsNotFound(err), err @@ -477,7 +478,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou // We don't want to update the cache's copy of the secret // so add the token to a freshly retrieved copy of the secret secrets := e.client.Core().Secrets(cachedSecret.Namespace) - liveSecret, err := secrets.Get(cachedSecret.Name) + liveSecret, err := secrets.Get(cachedSecret.Name, metav1.GetOptions{}) if err != nil { // Retry for any error other than a NotFound return !apierrors.IsNotFound(err), err @@ -541,7 +542,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri // We don't want to update the cache's copy of the service account // so remove the secret from a freshly retrieved copy of the service account serviceAccounts := e.client.Core().ServiceAccounts(saNamespace) - serviceAccount, err := serviceAccounts.Get(saName) + serviceAccount, err := serviceAccounts.Get(saName, metav1.GetOptions{}) // Ignore NotFound errors when attempting to remove a reference if apierrors.IsNotFound(err) { return nil @@ -598,7 +599,7 @@ func (e *TokensController) getServiceAccount(ns string, name string, uid types.U } // Live lookup - sa, err := e.client.Core().ServiceAccounts(ns).Get(name) + sa, err := e.client.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil, nil } @@ -634,7 +635,7 @@ func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetc } // Live lookup - secret, err := e.client.Core().Secrets(ns).Get(name) + secret, err := e.client.Core().Secrets(ns).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil, nil } diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index a86e84cb35..ca8767a39b 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -961,7 +961,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) // This method may have been waiting for a volume lock for some time. // Previous recycleVolumeOperation might just have saved an updated version, // so read current volume state now. - newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name) + newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) return @@ -1053,7 +1053,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) e // This method may have been waiting for a volume lock for some time. // Previous deleteVolumeOperation might just have saved an updated version, so // read current volume state now. - newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name) + newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) return nil @@ -1260,7 +1260,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa // yet. pvName := ctrl.getProvisionedVolumeNameForClaim(claim) - volume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(pvName) + volume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err == nil && volume != nil { // Volume has been already provisioned, nothing to do. glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) diff --git a/pkg/dns/config/sync.go b/pkg/dns/config/sync.go index 77f9234823..5f6958cca3 100644 --- a/pkg/dns/config/sync.go +++ b/pkg/dns/config/sync.go @@ -19,6 +19,7 @@ package config import ( "k8s.io/client-go/pkg/util/wait" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" fed "k8s.io/kubernetes/pkg/dns/federation" @@ -98,7 +99,7 @@ type kubeSync struct { var _ Sync = (*kubeSync)(nil) func (sync *kubeSync) Once() (*Config, error) { - cm, err := sync.client.Core().ConfigMaps(sync.ns).Get(sync.name) + cm, err := sync.client.Core().ConfigMaps(sync.ns).Get(sync.name, metav1.GetOptions{}) if err != nil { glog.Errorf("Error getting ConfigMap %v:%v err: %v", diff --git a/pkg/kubectl/cmd/attach.go b/pkg/kubectl/cmd/attach.go index 2dbbb0b8c0..bfd707b9ab 100644 --- a/pkg/kubectl/cmd/attach.go +++ b/pkg/kubectl/cmd/attach.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/unversioned/remotecommand" @@ -165,7 +166,7 @@ func (p *AttachOptions) Validate() error { // Run executes a validated remote execution against a pod. func (p *AttachOptions) Run() error { if p.Pod == nil { - pod, err := p.PodClient.Pods(p.Namespace).Get(p.PodName) + pod, err := p.PodClient.Pods(p.Namespace).Get(p.PodName, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 7e16ea62b2..39a8098030 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -267,15 +267,15 @@ func (o *DrainOptions) deleteOrEvictPodsSimple() error { func (o *DrainOptions) getController(sr *api.SerializedReference) (interface{}, error) { switch sr.Reference.Kind { case "ReplicationController": - return o.client.Core().ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name) + return o.client.Core().ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{}) case "DaemonSet": - return o.client.Extensions().DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name) + return o.client.Extensions().DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{}) case "Job": - return o.client.Batch().Jobs(sr.Reference.Namespace).Get(sr.Reference.Name) + return o.client.Batch().Jobs(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{}) case "ReplicaSet": - return o.client.Extensions().ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name) + return o.client.Extensions().ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{}) case "StatefulSet": - return o.client.Apps().StatefulSets(sr.Reference.Namespace).Get(sr.Reference.Name) + return o.client.Apps().StatefulSets(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{}) } return nil, fmt.Errorf("Unknown controller kind %q", sr.Reference.Kind) } @@ -330,7 +330,7 @@ func (o *DrainOptions) daemonsetFilter(pod api.Pod) (bool, *warning, *fatal) { if sr == nil || sr.Reference.Kind != "DaemonSet" { return true, nil, nil } - if _, err := o.client.Extensions().DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name); err != nil { + if _, err := o.client.Extensions().DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{}); err != nil { return false, nil, &fatal{err.Error()} } if !o.IgnoreDaemonsets { @@ -459,7 +459,7 @@ func (o *DrainOptions) deleteOrEvictPods(pods []api.Pod) error { } getPodFn := func(namespace, name string) (*api.Pod, error) { - return o.client.Core().Pods(namespace).Get(name) + return o.client.Core().Pods(namespace).Get(name, metav1.GetOptions{}) } if len(policyGroupVersion) > 0 { diff --git a/pkg/kubectl/cmd/exec.go b/pkg/kubectl/cmd/exec.go index bc840d3343..0d051395fa 100644 --- a/pkg/kubectl/cmd/exec.go +++ b/pkg/kubectl/cmd/exec.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/unversioned/remotecommand" @@ -258,7 +259,7 @@ func (o *StreamOptions) setupTTY() term.TTY { // Run executes a validated remote execution against a pod. func (p *ExecOptions) Run() error { - pod, err := p.PodClient.Pods(p.Namespace).Get(p.PodName) + pod, err := p.PodClient.Pods(p.Namespace).Get(p.PodName, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/kubectl/cmd/portforward.go b/pkg/kubectl/cmd/portforward.go index add05b23a6..297737ce45 100644 --- a/pkg/kubectl/cmd/portforward.go +++ b/pkg/kubectl/cmd/portforward.go @@ -26,6 +26,7 @@ import ( "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/unversioned/portforward" @@ -170,7 +171,7 @@ func (o PortForwardOptions) Validate() error { // RunPortForward implements all the necessary functionality for port-forward cmd. func (o PortForwardOptions) RunPortForward() error { - pod, err := o.PodClient.Pods(o.Namespace).Get(o.PodName) + pod, err := o.PodClient.Pods(o.Namespace).Get(o.PodName, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index 7f7a0e5882..beda44c371 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -179,7 +180,7 @@ func RunRollingUpdate(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args var newRc *api.ReplicationController // fetch rc - oldRc, err := coreClient.ReplicationControllers(cmdNamespace).Get(oldName) + oldRc, err := coreClient.ReplicationControllers(cmdNamespace).Get(oldName, metav1.GetOptions{}) if err != nil { if !errors.IsNotFound(err) || len(image) == 0 || len(args) > 1 { return err @@ -375,7 +376,7 @@ func RunRollingUpdate(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args } else { message = fmt.Sprintf("rolling updated to %q", newRc.Name) } - newRc, err = coreClient.ReplicationControllers(cmdNamespace).Get(newRc.Name) + newRc, err = coreClient.ReplicationControllers(cmdNamespace).Get(newRc.Name, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/kubectl/cmd/top_node.go b/pkg/kubectl/cmd/top_node.go index 657edb062a..948daf21dd 100644 --- a/pkg/kubectl/cmd/top_node.go +++ b/pkg/kubectl/cmd/top_node.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "k8s.io/kubernetes/pkg/api" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -144,7 +145,7 @@ func (o TopNodeOptions) RunTopNode() error { var nodes []api.Node if len(o.ResourceName) > 0 { - node, err := o.NodeClient.Nodes().Get(o.ResourceName) + node, err := o.NodeClient.Nodes().Get(o.ResourceName, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/kubectl/cmd/top_pod.go b/pkg/kubectl/cmd/top_pod.go index 75553f5701..db950eb8bf 100644 --- a/pkg/kubectl/cmd/top_pod.go +++ b/pkg/kubectl/cmd/top_pod.go @@ -23,6 +23,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -155,7 +156,7 @@ func (o TopPodOptions) RunTopPod() error { func verifyEmptyMetrics(o TopPodOptions, selector labels.Selector) error { if len(o.ResourceName) > 0 { - pod, err := o.PodClient.Pods(o.Namespace).Get(o.ResourceName) + pod, err := o.PodClient.Pods(o.Namespace).Get(o.ResourceName, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index da369ebc07..c7abf0d837 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -200,7 +200,7 @@ type NamespaceDescriber struct { } func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - ns, err := d.Core().Namespaces().Get(name) + ns, err := d.Core().Namespaces().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -374,7 +374,7 @@ type LimitRangeDescriber struct { func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { lr := d.Core().LimitRanges(namespace) - limitRange, err := lr.Get(name) + limitRange, err := lr.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -401,7 +401,7 @@ type ResourceQuotaDescriber struct { func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { rq := d.Core().ResourceQuotas(namespace) - resourceQuota, err := rq.Get(name) + resourceQuota, err := rq.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -469,7 +469,7 @@ type PodDescriber struct { } func (d *PodDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - pod, err := d.Core().Pods(namespace).Get(name) + pod, err := d.Core().Pods(namespace).Get(name, metav1.GetOptions{}) if err != nil { if describerSettings.ShowEvents { eventsInterface := d.Core().Events(namespace) @@ -766,7 +766,7 @@ type PersistentVolumeDescriber struct { func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Core().PersistentVolumes() - pv, err := c.Get(name) + pv, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -837,7 +837,7 @@ type PersistentVolumeClaimDescriber struct { func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Core().PersistentVolumeClaims(namespace) - pvc, err := c.Get(name) + pvc, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1139,7 +1139,7 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string, descri rc := d.Core().ReplicationControllers(namespace) pc := d.Core().Pods(namespace) - controller, err := rc.Get(name) + controller, err := rc.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1211,7 +1211,7 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings rsc := d.Extensions().ReplicaSets(namespace) pc := d.Core().Pods(namespace) - rs, err := rsc.Get(name) + rs, err := rsc.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1260,7 +1260,7 @@ type JobDescriber struct { } func (d *JobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - job, err := d.Batch().Jobs(namespace).Get(name) + job, err := d.Batch().Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1309,7 +1309,7 @@ type CronJobDescriber struct { } func (d *CronJobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - scheduledJob, err := d.Batch().CronJobs(namespace).Get(name) + scheduledJob, err := d.Batch().CronJobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1399,7 +1399,7 @@ func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings dc := d.Extensions().DaemonSets(namespace) pc := d.Core().Pods(namespace) - daemon, err := dc.Get(name) + daemon, err := dc.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1453,7 +1453,7 @@ type SecretDescriber struct { func (d *SecretDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Core().Secrets(namespace) - secret, err := c.Get(name) + secret, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1491,7 +1491,7 @@ type IngressDescriber struct { func (i *IngressDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := i.Extensions().Ingresses(namespace) - ing, err := c.Get(name) + ing, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1499,8 +1499,8 @@ func (i *IngressDescriber) Describe(namespace, name string, describerSettings De } func (i *IngressDescriber) describeBackend(ns string, backend *extensions.IngressBackend) string { - endpoints, _ := i.Core().Endpoints(ns).Get(backend.ServiceName) - service, _ := i.Core().Services(ns).Get(backend.ServiceName) + endpoints, _ := i.Core().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) + service, _ := i.Core().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) spName := "" for i := range service.Spec.Ports { sp := &service.Spec.Ports[i] @@ -1605,12 +1605,12 @@ type ServiceDescriber struct { func (d *ServiceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Core().Services(namespace) - service, err := c.Get(name) + service, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } - endpoints, _ := d.Core().Endpoints(namespace).Get(name) + endpoints, _ := d.Core().Endpoints(namespace).Get(name, metav1.GetOptions{}) var events *api.EventList if describerSettings.ShowEvents { events, _ = d.Core().Events(namespace).Search(service) @@ -1685,7 +1685,7 @@ type EndpointsDescriber struct { func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Core().Endpoints(namespace) - ep, err := c.Get(name) + ep, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1759,7 +1759,7 @@ type ServiceAccountDescriber struct { func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Core().ServiceAccounts(namespace) - serviceAccount, err := c.Get(name) + serviceAccount, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1840,7 +1840,7 @@ type NodeDescriber struct { func (d *NodeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { mc := d.Core().Nodes() - node, err := mc.Get(name) + node, err := mc.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1960,7 +1960,7 @@ type StatefulSetDescriber struct { } func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - ps, err := p.client.Apps().StatefulSets(namespace).Get(name) + ps, err := p.client.Apps().StatefulSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2003,7 +2003,7 @@ type CertificateSigningRequestDescriber struct { } func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - csr, err := p.client.Certificates().CertificateSigningRequests().Get(name) + csr, err := p.client.Certificates().CertificateSigningRequests().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2073,7 +2073,7 @@ type HorizontalPodAutoscalerDescriber struct { } func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - hpa, err := d.client.Autoscaling().HorizontalPodAutoscalers(namespace).Get(name) + hpa, err := d.client.Autoscaling().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2106,7 +2106,7 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, desc // TODO: switch to scale subresource once the required code is submitted. if strings.ToLower(hpa.Spec.ScaleTargetRef.Kind) == "replicationcontroller" { w.Write(LEVEL_0, "ReplicationController pods:\t") - rc, err := d.client.Core().ReplicationControllers(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name) + rc, err := d.client.Core().ReplicationControllers(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) if err == nil { w.Write(LEVEL_0, "%d current / %d desired\n", rc.Status.Replicas, rc.Spec.Replicas) } else { @@ -2234,7 +2234,7 @@ type DeploymentDescriber struct { } func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - d, err := dd.versionedClient.Extensions().Deployments(namespace).Get(name) + d, err := dd.versionedClient.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2371,7 +2371,7 @@ type ConfigMapDescriber struct { func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Core().ConfigMaps(namespace) - configMap, err := c.Get(name) + configMap, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2401,7 +2401,7 @@ type ClusterDescriber struct { } func (d *ClusterDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - cluster, err := d.Federation().Clusters().Get(name) + cluster, err := d.Federation().Clusters().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2445,7 +2445,7 @@ type NetworkPolicyDescriber struct { func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Extensions().NetworkPolicies(namespace) - networkPolicy, err := c.Get(name) + networkPolicy, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2470,7 +2470,7 @@ type StorageClassDescriber struct { } func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - sc, err := s.Storage().StorageClasses().Get(name) + sc, err := s.Storage().StorageClasses().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2499,7 +2499,7 @@ type PodDisruptionBudgetDescriber struct { } func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name) + pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/pkg/kubectl/history.go b/pkg/kubectl/history.go index 6f8279b815..74ed8c58c7 100644 --- a/pkg/kubectl/history.go +++ b/pkg/kubectl/history.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/extensions" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/runtime" @@ -56,7 +57,7 @@ type DeploymentHistoryViewer struct { // ViewHistory returns a revision-to-replicaset map as the revision history of a deployment func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { versionedClient := versionedClientsetForDeployment(h.c) - deployment, err := versionedClient.Extensions().Deployments(namespace).Get(name) + deployment, err := versionedClient.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err) } diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index fd3a82c247..d590bab8f7 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -193,7 +193,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { // annotation if it doesn't yet exist. _, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation] if !hasOriginalAnnotation { - existing, err := r.rcClient.ReplicationControllers(oldRc.Namespace).Get(oldRc.Name) + existing, err := r.rcClient.ReplicationControllers(oldRc.Namespace).Get(oldRc.Name, metav1.GetOptions{}) if err != nil { return err } @@ -398,7 +398,7 @@ func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, r if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait); err != nil { return nil, err } - return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name) + return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{}) } // readyPods returns the old and new ready counts for their pods. @@ -485,7 +485,7 @@ func (r *RollingUpdater) existingController(controller *api.ReplicationControlle return nil, errors.NewNotFound(api.Resource("replicationcontrollers"), controller.Name) } // controller name is required to get rc back - return r.rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name) + return r.rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{}) } // cleanupWithClients performs cleanup tasks after the rolling update. Update @@ -494,7 +494,7 @@ func (r *RollingUpdater) existingController(controller *api.ReplicationControlle func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { // Clean up annotations var err error - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name) + newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{}) if err != nil { return err } @@ -509,7 +509,7 @@ func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *api.ReplicationControl if err = wait.Poll(config.Interval, config.Timeout, client.ControllerHasDesiredReplicas(r.rcClient, newRc)); err != nil { return err } - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name) + newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{}) if err != nil { return err } @@ -545,7 +545,7 @@ func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationContro return err } err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { - _, err := c.ReplicationControllers(rc.Namespace).Get(oldName) + _, err := c.ReplicationControllers(rc.Namespace).Get(oldName, metav1.GetOptions{}) if err == nil { return false, nil } else if errors.IsNotFound(err) { @@ -569,7 +569,7 @@ func LoadExistingNextReplicationController(c coreclient.ReplicationControllersGe if len(newName) == 0 { return nil, nil } - newRc, err := c.ReplicationControllers(namespace).Get(newName) + newRc, err := c.ReplicationControllers(namespace).Get(newName, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return nil, nil } @@ -588,7 +588,7 @@ type NewControllerConfig struct { func CreateNewControllerFromCurrentController(rcClient coreclient.ReplicationControllersGetter, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) { containerIndex := 0 // load the old RC into the "new" RC - newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(cfg.OldName) + newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(cfg.OldName, metav1.GetOptions{}) if err != nil { return nil, err } @@ -782,7 +782,7 @@ func updateRcWithRetries(rcClient coreclient.ReplicationControllersGetter, names updateErr := e // Update the controller with the latest resource version, if the update failed we // can't trust rc so use oldRc.Name. - if rc, e = rcClient.ReplicationControllers(namespace).Get(oldRc.Name); e != nil { + if rc, e = rcClient.ReplicationControllers(namespace).Get(oldRc.Name, metav1.GetOptions{}); e != nil { // The Get failed: Value in rc cannot be trusted. rc = oldRc } @@ -814,7 +814,7 @@ func updatePodWithRetries(podClient coreclient.PodsGetter, namespace string, pod return } updateErr := e - if pod, e = podClient.Pods(namespace).Get(oldPod.Name); e != nil { + if pod, e = podClient.Pods(namespace).Get(oldPod.Name, metav1.GetOptions{}); e != nil { pod = oldPod } // Only return the error from update diff --git a/pkg/kubectl/rollout_status.go b/pkg/kubectl/rollout_status.go index 5eaf1fca0a..8f2b158ef7 100644 --- a/pkg/kubectl/rollout_status.go +++ b/pkg/kubectl/rollout_status.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/kubernetes/pkg/apis/extensions" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" "k8s.io/kubernetes/pkg/controller/deployment/util" @@ -45,7 +46,7 @@ type DeploymentStatusViewer struct { // Status returns a message describing deployment status, and a bool value indicating if the status is considered done func (s *DeploymentStatusViewer) Status(namespace, name string, revision int64) (string, bool, error) { - deployment, err := s.c.Deployments(namespace).Get(name) + deployment, err := s.c.Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", false, err } diff --git a/pkg/kubectl/scale.go b/pkg/kubectl/scale.go index aa4cb916b5..4c2a160fad 100644 --- a/pkg/kubectl/scale.go +++ b/pkg/kubectl/scale.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" @@ -166,7 +167,7 @@ type ReplicationControllerScaler struct { // ScaleSimple does a simple one-shot attempt at scaling. It returns the // resourceVersion of the replication controller if the update is successful. func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - controller, err := scaler.c.ReplicationControllers(namespace).Get(name) + controller, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", ScaleError{ScaleGetFailure, "Unknown", err} } @@ -218,7 +219,7 @@ func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize // will be deliver, since it may already be in the expected state. // To protect from these two, we first issue Get() to ensure that we // are not already in the expected state. - currentRC, err := scaler.c.ReplicationControllers(namespace).Get(name) + currentRC, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -264,7 +265,7 @@ type ReplicaSetScaler struct { // ScaleSimple does a simple one-shot attempt at scaling. It returns the // resourceVersion of the replicaset if the update is successful. func (scaler *ReplicaSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - rs, err := scaler.c.ReplicaSets(namespace).Get(name) + rs, err := scaler.c.ReplicaSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", ScaleError{ScaleGetFailure, "Unknown", err} } @@ -300,7 +301,7 @@ func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, prec return err } if waitForReplicas != nil { - rs, err := scaler.c.ReplicaSets(namespace).Get(name) + rs, err := scaler.c.ReplicaSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -335,7 +336,7 @@ type StatefulSetScaler struct { // ScaleSimple does a simple one-shot attempt at scaling. It returns the // resourceVersion of the statefulset if the update is successful. func (scaler *StatefulSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - ps, err := scaler.c.StatefulSets(namespace).Get(name) + ps, err := scaler.c.StatefulSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", ScaleError{ScaleGetFailure, "Unknown", err} } @@ -368,7 +369,7 @@ func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, pre return err } if waitForReplicas != nil { - job, err := scaler.c.StatefulSets(namespace).Get(name) + job, err := scaler.c.StatefulSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -388,7 +389,7 @@ type JobScaler struct { // ScaleSimple is responsible for updating job's parallelism. It returns the // resourceVersion of the job if the update is successful. func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - job, err := scaler.c.Jobs(namespace).Get(name) + job, err := scaler.c.Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", ScaleError{ScaleGetFailure, "Unknown", err} } @@ -425,7 +426,7 @@ func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditio return err } if waitForReplicas != nil { - job, err := scaler.c.Jobs(namespace).Get(name) + job, err := scaler.c.Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -457,7 +458,7 @@ type DeploymentScaler struct { // count. It returns the resourceVersion of the deployment if the update is // successful. func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - deployment, err := scaler.c.Deployments(namespace).Get(name) + deployment, err := scaler.c.Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", ScaleError{ScaleGetFailure, "Unknown", err} } @@ -495,7 +496,7 @@ func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, prec return err } if waitForReplicas != nil { - deployment, err := scaler.c.Deployments(namespace).Get(name) + deployment, err := scaler.c.Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/kubectl/scale_test.go b/pkg/kubectl/scale_test.go index 1460de215c..1407e2aa93 100644 --- a/pkg/kubectl/scale_test.go +++ b/pkg/kubectl/scale_test.go @@ -24,6 +24,7 @@ import ( kerrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" @@ -277,7 +278,7 @@ func (c *ErrorJobs) Update(job *batch.Job) (*batch.Job, error) { return nil, errors.New("Job update failure") } -func (c *ErrorJobs) Get(name string) (*batch.Job, error) { +func (c *ErrorJobs) Get(name string, options metav1.GetOptions) (*batch.Job, error) { zero := int32(0) return &batch.Job{ Spec: batch.JobSpec{ @@ -540,7 +541,7 @@ func (c *ErrorDeployments) Update(deployment *extensions.Deployment) (*extension return nil, errors.New("deployment update failure") } -func (c *ErrorDeployments) Get(name string) (*extensions.Deployment, error) { +func (c *ErrorDeployments) Get(name string, options metav1.GetOptions) (*extensions.Deployment, error) { return &extensions.Deployment{ Spec: extensions.DeploymentSpec{ Replicas: 0, diff --git a/pkg/kubectl/stop.go b/pkg/kubectl/stop.go index a90e12df92..4c02dbd154 100644 --- a/pkg/kubectl/stop.go +++ b/pkg/kubectl/stop.go @@ -161,7 +161,7 @@ func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterfac func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { rc := reaper.client.ReplicationControllers(namespace) scaler := &ReplicationControllerScaler{reaper.client} - ctrl, err := rc.Get(name) + ctrl, err := rc.Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -230,7 +230,7 @@ func getOverlappingReplicaSets(c extensionsclient.ReplicaSetInterface, rs *exten func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { rsc := reaper.client.ReplicaSets(namespace) scaler := &ReplicaSetScaler{reaper.client} - rs, err := rsc.Get(name) + rs, err := rsc.Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -292,7 +292,7 @@ func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Durati } func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - ds, err := reaper.client.DaemonSets(namespace).Get(name) + ds, err := reaper.client.DaemonSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -313,7 +313,7 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio // Wait for the daemon set controller to kill all the daemon pods. if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) { - updatedDS, err := reaper.client.DaemonSets(namespace).Get(name) + updatedDS, err := reaper.client.DaemonSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -329,7 +329,7 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { statefulsets := reaper.client.StatefulSets(namespace) scaler := &StatefulSetScaler{reaper.client} - ps, err := statefulsets.Get(name) + ps, err := statefulsets.Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -374,7 +374,7 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra jobs := reaper.client.Jobs(namespace) pods := reaper.podClient.Pods(namespace) scaler := &JobScaler{reaper.client} - job, err := jobs.Get(name) + job, err := jobs.Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -431,7 +431,7 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati // Use observedGeneration to determine if the deployment controller noticed the pause. if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) { - return deployments.Get(name) + return deployments.Get(name, metav1.GetOptions{}) }, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil { return err } @@ -476,7 +476,7 @@ type updateDeploymentFunc func(d *extensions.Deployment) func (reaper *DeploymentReaper) updateDeploymentWithRetries(namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) { deployments := reaper.dClient.Deployments(namespace) err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - if deployment, err = deployments.Get(name); err != nil { + if deployment, err = deployments.Get(name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. @@ -495,7 +495,7 @@ func (reaper *DeploymentReaper) updateDeploymentWithRetries(namespace, name stri func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { pods := reaper.client.Pods(namespace) - _, err := pods.Get(name) + _, err := pods.Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -504,7 +504,7 @@ func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gra func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { services := reaper.client.Services(namespace) - _, err := services.Get(name) + _, err := services.Get(name, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/kubectl/stop_test.go b/pkg/kubectl/stop_test.go index 7d16fdbb90..91a2a26aa1 100644 --- a/pkg/kubectl/stop_test.go +++ b/pkg/kubectl/stop_test.go @@ -527,7 +527,7 @@ type noSuchPod struct { coreclient.PodInterface } -func (c *noSuchPod) Get(name string) (*api.Pod, error) { +func (c *noSuchPod) Get(name string, options metav1.GetOptions) (*api.Pod, error) { return nil, fmt.Errorf("%s does not exist", name) } diff --git a/pkg/kubelet/client/kubelet_client.go b/pkg/kubelet/client/kubelet_client.go index 0e593ccc9a..612dca57e9 100644 --- a/pkg/kubelet/client/kubelet_client.go +++ b/pkg/kubelet/client/kubelet_client.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/transport" "k8s.io/kubernetes/pkg/types" @@ -103,14 +104,14 @@ func (c *KubeletClientConfig) transportConfig() *transport.Config { // NodeGetter defines an interface for looking up a node by name type NodeGetter interface { - Get(name string) (*v1.Node, error) + Get(name string, options metav1.GetOptions) (*v1.Node, error) } // NodeGetterFunc allows implementing NodeGetter with a function -type NodeGetterFunc func(name string) (*v1.Node, error) +type NodeGetterFunc func(name string, options metav1.GetOptions) (*v1.Node, error) -func (f NodeGetterFunc) Get(name string) (*v1.Node, error) { - return f(name) +func (f NodeGetterFunc) Get(name string, options metav1.GetOptions) (*v1.Node, error) { + return f(name, options) } // NodeConnectionInfoGetter obtains connection info from the status of a Node API object @@ -154,7 +155,7 @@ func NewNodeConnectionInfoGetter(nodes NodeGetter, config KubeletClientConfig) ( } func (k *NodeConnectionInfoGetter) GetConnectionInfo(ctx api.Context, nodeName types.NodeName) (*ConnectionInfo, error) { - node, err := k.nodes.Get(string(nodeName)) + node, err := k.nodes.Get(string(nodeName), metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 9b9de26d6e..0f6019f96e 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -100,7 +100,7 @@ func (kl *Kubelet) tryRegisterWithApiServer(node *v1.Node) bool { return false } - existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName)) + existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) if err != nil { glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) return false diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 32f3ca3506..4f2c007ef6 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -466,7 +466,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container if kl.kubeClient == nil { return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name) } - configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name) + configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name, metav1.GetOptions{}) if err != nil { return result, err } @@ -484,7 +484,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container if kl.kubeClient == nil { return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name) } - secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name) + secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name, metav1.GetOptions{}) if err != nil { return result, err } @@ -606,7 +606,7 @@ func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) ([]v1.Secret, error) { pullSecrets := []v1.Secret{} for _, secretRef := range pod.Spec.ImagePullSecrets { - secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name) + secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name, metav1.GetOptions{}) if err != nil { glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err) continue @@ -1497,13 +1497,13 @@ func hasHostNamespace(pod *v1.Pod) bool { func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim != nil { - pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName) + pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) if err != nil { glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err) continue } if pvc != nil { - referencedVolume, err := kl.kubeClient.Core().PersistentVolumes().Get(pvc.Spec.VolumeName) + referencedVolume, err := kl.kubeClient.Core().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { glog.Warningf("unable to retrieve pvc %s - %v", pvc.Spec.VolumeName, err) continue diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index c8cd617cf2..1b969c5b40 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -406,7 +406,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { } // TODO: make me easier to express from client code - pod, err := m.kubeClient.Core().Pods(status.podNamespace).Get(status.podName) + pod, err := m.kubeClient.Core().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) if errors.IsNotFound(err) { glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid) // If the Pod is deleted the status will be cleared in diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 84f10551c9..be53e320a8 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" @@ -366,7 +367,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( namespace string, claimName string) (string, types.UID, error) { pvc, err := - dswp.kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName) + dswp.kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) if err != nil || pvc == nil { return "", "", fmt.Errorf( "failed to fetch PVC %s/%s from API server. err=%v", @@ -394,7 +395,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec( name string, pvcReadOnly bool, expectedClaimUID types.UID) (*volume.Spec, string, error) { - pv, err := dswp.kubeClient.Core().PersistentVolumes().Get(name) + pv, err := dswp.kubeClient.Core().PersistentVolumes().Get(name, metav1.GetOptions{}) if err != nil || pv == nil { return nil, "", fmt.Errorf( "failed to fetch PV %q from API server. err=%v", name, err) diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index c2da681dfd..d50c642cba 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -28,6 +28,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/cmd/kubelet/app/options" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" @@ -548,7 +549,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error { // Get the node status to retrieve volume device path information. - node, fetchErr := rc.kubeClient.Core().Nodes().Get(string(rc.nodeName)) + node, fetchErr := rc.kubeClient.Core().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) if fetchErr != nil { glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) } else { diff --git a/pkg/master/controller.go b/pkg/master/controller.go index 2552c49424..b9d9421c0f 100644 --- a/pkg/master/controller.go +++ b/pkg/master/controller.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/endpoints" "k8s.io/kubernetes/pkg/api/errors" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/genericapiserver" "k8s.io/kubernetes/pkg/registry/core/rangeallocation" @@ -185,7 +186,7 @@ func (c *Controller) UpdateKubernetesService(reconcile bool) error { // CreateNamespaceIfNeeded will create a namespace if it doesn't already exist func (c *Controller) CreateNamespaceIfNeeded(ns string) error { - if _, err := c.NamespaceClient.Namespaces().Get(ns); err == nil { + if _, err := c.NamespaceClient.Namespaces().Get(ns, metav1.GetOptions{}); err == nil { // the namespace already exists return nil } @@ -237,7 +238,7 @@ func createEndpointPortSpec(endpointPort int, endpointPortName string, extraEndp // CreateMasterServiceIfNeeded will create the specified service if it // doesn't already exist. func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePorts []api.ServicePort, serviceType api.ServiceType, reconcile bool) error { - if s, err := c.ServiceClient.Services(api.NamespaceDefault).Get(serviceName); err == nil { + if s, err := c.ServiceClient.Services(api.NamespaceDefault).Get(serviceName, metav1.GetOptions{}); err == nil { // The service already exists. if reconcile { if svc, updated := getMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated { @@ -317,7 +318,7 @@ func NewMasterCountEndpointReconciler(masterCount int, endpointClient coreclient // to be running (c.masterCount). // * ReconcileEndpoints is called periodically from all apiservers. func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error { - e, err := r.endpointClient.Endpoints(api.NamespaceDefault).Get(serviceName) + e, err := r.endpointClient.Endpoints(api.NamespaceDefault).Get(serviceName, metav1.GetOptions{}) if err != nil { e = &api.Endpoints{ ObjectMeta: api.ObjectMeta{ diff --git a/pkg/quota/evaluator/core/pods.go b/pkg/quota/evaluator/core/pods.go index bc7c2d0ca4..f0e23f396b 100644 --- a/pkg/quota/evaluator/core/pods.go +++ b/pkg/quota/evaluator/core/pods.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/validation" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/kubelet/qos" @@ -79,7 +80,7 @@ func NewPodEvaluator(kubeClient clientset.Interface, f informers.SharedInformerF // admission.Update: computeResources, }, GetFuncByNamespace: func(namespace, name string) (runtime.Object, error) { - return kubeClient.Core().Pods(namespace).Get(name) + return kubeClient.Core().Pods(namespace).Get(name, metav1.GetOptions{}) }, ConstraintsFunc: PodConstraintsFunc, MatchedResourceNames: allResources, diff --git a/pkg/registry/core/node/etcd/etcd.go b/pkg/registry/core/node/etcd/etcd.go index 535e951265..87a57a88e5 100644 --- a/pkg/registry/core/node/etcd/etcd.go +++ b/pkg/registry/core/node/etcd/etcd.go @@ -118,8 +118,8 @@ func NewStorage(opts generic.RESTOptions, kubeletClientConfig client.KubeletClie proxyREST := &noderest.ProxyREST{Store: store, ProxyTransport: proxyTransport} // Build a NodeGetter that looks up nodes using the REST handler - nodeGetter := client.NodeGetterFunc(func(nodeName string) (*v1.Node, error) { - obj, err := nodeREST.Get(api.NewContext(), nodeName, &metav1.GetOptions{}) + nodeGetter := client.NodeGetterFunc(func(nodeName string, options metav1.GetOptions) (*v1.Node, error) { + obj, err := nodeREST.Get(api.NewContext(), nodeName, &options) if err != nil { return nil, err } diff --git a/pkg/volume/azure_file/azure_util.go b/pkg/volume/azure_file/azure_util.go index 12ee098a09..647edf35ef 100644 --- a/pkg/volume/azure_file/azure_util.go +++ b/pkg/volume/azure_file/azure_util.go @@ -19,6 +19,7 @@ package azure_file import ( "fmt" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/volume" ) @@ -36,7 +37,7 @@ func (s *azureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secret return "", "", fmt.Errorf("Cannot get kube client") } - keys, err := kubeClient.Core().Secrets(nameSpace).Get(secretName) + keys, err := kubeClient.Core().Secrets(nameSpace).Get(secretName, metav1.GetOptions{}) if err != nil { return "", "", fmt.Errorf("Couldn't get secret %v/%v", nameSpace, secretName) } diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index b7243f669c..258f3afa6b 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" @@ -90,7 +91,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume. return nil, fmt.Errorf("Cannot get kube client") } - secretName, err := kubeClient.Core().Secrets(pod.Namespace).Get(cephvs.SecretRef.Name) + secretName, err := kubeClient.Core().Secrets(pod.Namespace).Get(cephvs.SecretRef.Name, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, cephvs.SecretRef, err) return nil, err diff --git a/pkg/volume/configmap/configmap.go b/pkg/volume/configmap/configmap.go index 9c3e5e2005..e1e39cf25f 100644 --- a/pkg/volume/configmap/configmap.go +++ b/pkg/volume/configmap/configmap.go @@ -21,6 +21,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/types" ioutil "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" @@ -169,7 +170,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("Cannot setup configMap volume %v because kube client is not configured", b.volName) } - configMap, err := kubeClient.Core().ConfigMaps(b.pod.Namespace).Get(b.source.Name) + configMap, err := kubeClient.Core().ConfigMaps(b.pod.Namespace).Get(b.source.Name, metav1.GetOptions{}) if err != nil { glog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err) return err diff --git a/pkg/volume/flexvolume/flexvolume.go b/pkg/volume/flexvolume/flexvolume.go index 73a84ebf77..4c468e4b40 100644 --- a/pkg/volume/flexvolume/flexvolume.go +++ b/pkg/volume/flexvolume/flexvolume.go @@ -26,6 +26,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/mount" @@ -117,7 +118,7 @@ func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ vol return nil, fmt.Errorf("Cannot get kube client") } - secretName, err := kubeClient.Core().Secrets(pod.Namespace).Get(fv.SecretRef.Name) + secretName, err := kubeClient.Core().Secrets(pod.Namespace).Get(fv.SecretRef.Name, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, fv.SecretRef, err) return nil, err diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index cd30036645..1b46c4e713 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/labels" @@ -123,7 +124,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu ep_name := source.EndpointsName // PVC/POD is in same ns. ns := pod.Namespace - ep, err := plugin.host.GetKubeClient().Core().Endpoints(ns).Get(ep_name) + ep, err := plugin.host.GetKubeClient().Core().Endpoints(ns).Get(ep_name, metav1.GetOptions{}) if err != nil { glog.Errorf("glusterfs: failed to get endpoints %s[%v]", ep_name, err) return nil, err diff --git a/pkg/volume/secret/secret.go b/pkg/volume/secret/secret.go index 73aca3828e..a62a128377 100644 --- a/pkg/volume/secret/secret.go +++ b/pkg/volume/secret/secret.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/types" ioutil "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" @@ -192,7 +193,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("Cannot setup secret volume %v because kube client is not configured", b.volName) } - secret, err := kubeClient.Core().Secrets(b.pod.Namespace).Get(b.source.SecretName) + secret, err := kubeClient.Core().Secrets(b.pod.Namespace).Get(b.source.SecretName, metav1.GetOptions{}) if err != nil { glog.Errorf("Couldn't get secret %v/%v", b.pod.Namespace, b.source.SecretName) return err diff --git a/pkg/volume/util.go b/pkg/volume/util.go index b9879b9280..7e70ad805f 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -21,6 +21,7 @@ import ( "reflect" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/watch" @@ -162,7 +163,7 @@ func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { } func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { - return c.client.Core().Pods(namespace).Get(name) + return c.client.Core().Pods(namespace).Get(name, metav1.GetOptions{}) } func (c *realRecyclerClient) DeletePod(name, namespace string) error { diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index 0829226a17..9d1e2d24ce 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -28,6 +28,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/record" kevents "k8s.io/kubernetes/pkg/kubelet/events" @@ -710,7 +711,7 @@ func (oe *operationExecutor) generateDetachVolumeFunc( func (oe *operationExecutor) verifyVolumeIsSafeToDetach( volumeToDetach AttachedVolume) error { // Fetch current node object - node, fetchErr := oe.kubeClient.Core().Nodes().Get(string(volumeToDetach.NodeName)) + node, fetchErr := oe.kubeClient.Core().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{}) if fetchErr != nil { if errors.IsNotFound(fetchErr) { glog.Warningf("Node %q not found on API server. DetachVolume will skip safe to detach check.", @@ -1185,7 +1186,7 @@ func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc( } // Fetch current node object - node, fetchErr := oe.kubeClient.Core().Nodes().Get(string(nodeName)) + node, fetchErr := oe.kubeClient.Core().Nodes().Get(string(nodeName), metav1.GetOptions{}) if fetchErr != nil { // On failure, return error. Caller will log and retry. return fmt.Errorf( diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 8078869d6d..bb89ad7009 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/util/mount" @@ -118,7 +119,7 @@ func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interf if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(pod.Namespace).Get(secretName) + secrets, err := kubeClient.Core().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -134,7 +135,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName) + secrets, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -154,7 +155,7 @@ func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) return nil, fmt.Errorf("Volume has no class annotation") } - class, err := kubeClient.Storage().StorageClasses().Get(className) + class, err := kubeClient.Storage().StorageClasses().Get(className, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/plugin/pkg/admission/exec/admission.go b/plugin/pkg/admission/exec/admission.go index 05426c51b9..10fa862653 100644 --- a/plugin/pkg/admission/exec/admission.go +++ b/plugin/pkg/admission/exec/admission.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/rest" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" ) func init() { @@ -86,7 +87,7 @@ func (d *denyExec) Admit(a admission.Attributes) (err error) { if connectRequest.ResourcePath != "pods/exec" && connectRequest.ResourcePath != "pods/attach" { return nil } - pod, err := d.client.Core().Pods(a.GetNamespace()).Get(connectRequest.Name) + pod, err := d.client.Core().Pods(a.GetNamespace()).Get(connectRequest.Name, metav1.GetOptions{}) if err != nil { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/namespace/exists/admission.go b/plugin/pkg/admission/namespace/exists/admission.go index 41e31c20b5..490bc3e8d0 100644 --- a/plugin/pkg/admission/namespace/exists/admission.go +++ b/plugin/pkg/admission/namespace/exists/admission.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/controller/informers" ) @@ -75,7 +76,7 @@ func (e *exists) Admit(a admission.Attributes) (err error) { } // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not - _, err = e.client.Core().Namespaces().Get(a.GetNamespace()) + _, err = e.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return err diff --git a/plugin/pkg/admission/namespace/lifecycle/admission.go b/plugin/pkg/admission/namespace/lifecycle/admission.go index 2547e36bc7..ee448c966d 100644 --- a/plugin/pkg/admission/namespace/lifecycle/admission.go +++ b/plugin/pkg/admission/namespace/lifecycle/admission.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" utilcache "k8s.io/kubernetes/pkg/util/cache" "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/sets" @@ -141,7 +142,7 @@ func (l *lifecycle) Admit(a admission.Attributes) error { // refuse to operate on non-existent namespaces if !exists || forceLiveLookup { // as a last resort, make a call directly to storage - namespaceObj, err = l.client.Core().Namespaces().Get(a.GetNamespace()) + namespaceObj, err = l.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return err diff --git a/plugin/pkg/admission/podnodeselector/admission.go b/plugin/pkg/admission/podnodeselector/admission.go index 14be567144..680e0b903b 100644 --- a/plugin/pkg/admission/podnodeselector/admission.go +++ b/plugin/pkg/admission/podnodeselector/admission.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller/informers" @@ -177,7 +178,7 @@ func (p *podNodeSelector) Validate() error { } func (p *podNodeSelector) defaultGetNamespace(name string) (*api.Namespace, error) { - namespace, err := p.client.Core().Namespaces().Get(name) + namespace, err := p.client.Core().Namespaces().Get(name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("namespace %s does not exist", name) } diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go index 980973accd..09652eb404 100644 --- a/plugin/pkg/admission/serviceaccount/admission.go +++ b/plugin/pkg/admission/serviceaccount/admission.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/fields" kubelet "k8s.io/kubernetes/pkg/kubelet/types" @@ -265,7 +266,7 @@ func (s *serviceAccount) getServiceAccount(namespace string, name string) (*api. if i != 0 { time.Sleep(retryInterval) } - serviceAccount, err := s.client.Core().ServiceAccounts(namespace).Get(name) + serviceAccount, err := s.client.Core().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) if err == nil { return serviceAccount, nil } diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index d47434442e..c67958a7f7 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/v1" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/controller/informers" @@ -601,7 +602,7 @@ func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue // Get the pod again; it may have changed/been scheduled already. getBackoff := initialGetBackoff for { - pod, err := factory.Client.Core().Pods(podID.Namespace).Get(podID.Name) + pod, err := factory.Client.Core().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{}) if err == nil { if len(pod.Spec.NodeName) == 0 { podQueue.AddIfNotPresent(pod)