Convert core code to metav1.ListOptions

pull/6/head
Clayton Coleman 2017-01-21 19:05:19 -05:00
parent a35be4e02e
commit 245b592fac
No known key found for this signature in database
GPG Key ID: 3D16906B4F1C5CB3
12 changed files with 40 additions and 37 deletions

View File

@ -20,6 +20,7 @@ import (
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
@ -32,16 +33,16 @@ import (
type ListerWatcher interface {
// List should return a list type object; the Items field will be extracted, and the
// ResourceVersion field will be used to start the watch in the right place.
List(options v1.ListOptions) (runtime.Object, error)
List(options metav1.ListOptions) (runtime.Object, error)
// Watch should begin a watch at the specified version.
Watch(options v1.ListOptions) (watch.Interface, error)
Watch(options metav1.ListOptions) (watch.Interface, error)
}
// ListFunc knows how to list resources
type ListFunc func(options v1.ListOptions) (runtime.Object, error)
type ListFunc func(options metav1.ListOptions) (runtime.Object, error)
// WatchFunc knows how to watch resources
type WatchFunc func(options v1.ListOptions) (watch.Interface, error)
type WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface.
// It is a convenience function for users of NewReflector, etc.
@ -58,28 +59,28 @@ type Getter interface {
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector.
func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch {
listFunc := func(options v1.ListOptions) (runtime.Object, error) {
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
return c.Get().
Namespace(namespace).
Resource(resource).
VersionedParams(&options, api.ParameterCodec).
VersionedParams(&options, metav1.ParameterCodec).
FieldsSelectorParam(fieldSelector).
Do().
Get()
}
watchFunc := func(options v1.ListOptions) (watch.Interface, error) {
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
return c.Get().
Prefix("watch").
Namespace(namespace).
Resource(resource).
VersionedParams(&options, api.ParameterCodec).
VersionedParams(&options, metav1.ParameterCodec).
FieldsSelectorParam(fieldSelector).
Watch()
}
return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
}
func timeoutFromListOptions(options v1.ListOptions) time.Duration {
func timeoutFromListOptions(options metav1.ListOptions) time.Duration {
if options.TimeoutSeconds != nil {
return time.Duration(*options.TimeoutSeconds) * time.Second
}
@ -87,12 +88,12 @@ func timeoutFromListOptions(options v1.ListOptions) time.Duration {
}
// List a set of apiserver resources
func (lw *ListWatch) List(options v1.ListOptions) (runtime.Object, error) {
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
return lw.ListFunc(options)
}
// Watch a set of apiserver resources
func (lw *ListWatch) Watch(options v1.ListOptions) (watch.Interface, error) {
func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) {
return lw.WatchFunc(options)
}
@ -102,7 +103,7 @@ func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch
return nil, nil
}
list, err := lw.List(v1.ListOptions{})
list, err := lw.List(metav1.ListOptions{})
if err != nil {
return nil, err
}
@ -154,7 +155,7 @@ func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch
}
currResourceVersion := metaObj.GetResourceVersion()
watchInterface, err := lw.Watch(v1.ListOptions{ResourceVersion: currResourceVersion})
watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion})
if err != nil {
return nil, err
}

View File

@ -36,11 +36,11 @@ import (
"github.com/golang/glog"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api/v1"
)
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
@ -239,7 +239,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
// Explicitly set "0" as resource version - it's fine for the List()
// to be served from cache and potentially be delayed relative to
// etcd contents. Reflector framework will catch up via Watch() eventually.
options := v1.ListOptions{ResourceVersion: "0"}
options := metav1.ListOptions{ResourceVersion: "0"}
list, err := r.listerWatcher.List(options)
if err != nil {
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
@ -282,7 +282,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
for {
timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
options = v1.ListOptions{
options = metav1.ListOptions{
ResourceVersion: resourceVersion,
// We want to avoid situations of hanging watchers. Stop any wachers that do not
// receive any events within the timeout window.

View File

@ -122,11 +122,11 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
}
lw := &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
return b.CoreClient.Secrets(b.Namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
return b.CoreClient.Secrets(b.Namespace).Watch(options)
},

View File

@ -97,7 +97,7 @@ func (jm *CronJobController) Run(stopCh <-chan struct{}) {
// SyncAll lists all the CronJobs and Jobs and reconciles them.
func (jm *CronJobController) SyncAll() {
sjl, err := jm.kubeClient.BatchV2alpha1().CronJobs(v1.NamespaceAll).List(v1.ListOptions{})
sjl, err := jm.kubeClient.BatchV2alpha1().CronJobs(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
glog.Errorf("Error listing cronjobs: %v", err)
return
@ -105,7 +105,7 @@ func (jm *CronJobController) SyncAll() {
sjs := sjl.Items
glog.V(4).Infof("Found %d cronjobs", len(sjs))
jl, err := jm.kubeClient.BatchV2alpha1().Jobs(v1.NamespaceAll).List(v1.ListOptions{})
jl, err := jm.kubeClient.BatchV2alpha1().Jobs(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
glog.Errorf("Error listing jobs")
return
@ -238,7 +238,7 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
}
// remove all pods...
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
options := v1.ListOptions{LabelSelector: selector.String()}
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := pc.ListPods(job.Namespace, options)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedList", "List job-pods: %v", err)

View File

@ -177,7 +177,7 @@ func (f *fakeJobControl) Clear() {
// created as an interface to allow testing.
type podControlInterface interface {
// ListPods list pods
ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error)
ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error)
// DeleteJob deletes the pod identified by name.
// TODO: delete by UID?
DeletePod(namespace string, name string) error
@ -191,7 +191,7 @@ type realPodControl struct {
var _ podControlInterface = &realPodControl{}
func (r realPodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
func (r realPodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
return r.KubeClient.Core().Pods(namespace).List(opts)
}
@ -208,7 +208,7 @@ type fakePodControl struct {
var _ podControlInterface = &fakePodControl{}
func (f *fakePodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
func (f *fakePodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
f.Lock()
defer f.Unlock()
return &v1.PodList{Items: f.Pods}, nil

View File

@ -23,13 +23,14 @@ import (
"time"
"github.com/golang/glog"
heapster "k8s.io/heapster/metrics/api/v1/types"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
heapster "k8s.io/heapster/metrics/api/v1/types"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
)
// PodResourceInfo contains pod resourcemetric values as a map from pod names to
@ -128,7 +129,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
}
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
podList, err := h.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
podList, err := h.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
}

View File

@ -70,7 +70,7 @@ func RequestNodeCertificate(client certificatesclient.CertificateSigningRequestI
// Make a default timeout = 3600s.
var defaultTimeoutSeconds int64 = 3600
resultCh, err := client.Watch(v1.ListOptions{
resultCh, err := client.Watch(metav1.ListOptions{
Watch: true,
TimeoutSeconds: &defaultTimeoutSeconds,
FieldSelector: fields.OneTermEqualSelector("metadata.name", req.Name).String(),

View File

@ -181,7 +181,7 @@ func (r *EvictionREST) getPodDisruptionBudgets(ctx genericapirequest.Context, po
return nil, nil
}
pdbList, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(pod.Namespace).List(api.ListOptions{})
pdbList, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(pod.Namespace).List(metav1.ListOptions{})
if err != nil {
return nil, err
}

View File

@ -22,6 +22,7 @@ import (
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
@ -119,7 +120,7 @@ func (c *Repair) runOnce() error {
// the service collection. The caching layer keeps per-collection RVs,
// and this is proper, since in theory the collections could be hosted
// in separate etcd (or even non-etcd) instances.
list, err := c.serviceClient.Services(api.NamespaceAll).List(api.ListOptions{})
list, err := c.serviceClient.Services(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("unable to refresh the service IP block: %v", err)
}

View File

@ -23,11 +23,11 @@ import (
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/rbac"
rbacapiv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
rbacapiv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
@ -132,7 +132,7 @@ func PostStartHook(hookContext genericapiserver.PostStartHookContext) error {
return false, nil
}
existingClusterRoles, err := clientset.ClusterRoles().List(api.ListOptions{})
existingClusterRoles, err := clientset.ClusterRoles().List(metav1.ListOptions{})
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to initialize clusterroles: %v", err))
return false, nil
@ -149,7 +149,7 @@ func PostStartHook(hookContext genericapiserver.PostStartHookContext) error {
}
}
existingClusterRoleBindings, err := clientset.ClusterRoleBindings().List(api.ListOptions{})
existingClusterRoleBindings, err := clientset.ClusterRoleBindings().List(metav1.ListOptions{})
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to initialize clusterrolebindings: %v", err))
return false, nil

View File

@ -456,7 +456,7 @@ func (p *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocato
if kubeClient == nil {
return fmt.Errorf("glusterfs: failed to get kube client when collecting gids")
}
pvList, err := kubeClient.Core().PersistentVolumes().List(v1.ListOptions{LabelSelector: labels.Everything().String()})
pvList, err := kubeClient.Core().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
glog.Errorf("glusterfs: failed to get existing persistent volumes")
return err

View File

@ -178,7 +178,7 @@ func (c *realRecyclerClient) Event(eventtype, message string) {
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
podSelector, _ := fields.ParseSelector("metadata.name=" + name)
options := v1.ListOptions{
options := metav1.ListOptions{
FieldSelector: podSelector.String(),
Watch: true,
}
@ -189,7 +189,7 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s
}
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
eventWatch, err := c.client.Core().Events(namespace).Watch(v1.ListOptions{
eventWatch, err := c.client.Core().Events(namespace).Watch(metav1.ListOptions{
FieldSelector: eventSelector.String(),
Watch: true,
})