mirror of https://github.com/k3s-io/k3s
remove duplicated import and wrong alias name of api package
parent
d368afd845
commit
adc0eef43e
|
@ -27,7 +27,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
|
@ -132,7 +132,7 @@ func Run(s *options.CloudControllerManagerServer, cloud cloudprovider.Interface)
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "cloud-controller-manager"})
|
||||
|
||||
run := func(stop <-chan struct{}) {
|
||||
rootClientBuilder := controller.SimpleControllerClientBuilder{
|
||||
|
|
|
@ -21,7 +21,7 @@ package app
|
|||
import (
|
||||
"time"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
@ -65,7 +65,7 @@ func Run(s *GKECertificatesController) error {
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "gke-certificates-controller"})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "gke-certificates-controller"})
|
||||
|
||||
clientBuilder := controller.SimpleControllerClientBuilder{ClientConfig: kubeconfig}
|
||||
client := clientBuilder.ClientOrDie("certificate-controller")
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
@ -155,7 +155,7 @@ func Run(s *options.CMServer) error {
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "controller-manager"})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "controller-manager"})
|
||||
|
||||
run := func(stop <-chan struct{}) {
|
||||
rootClientBuilder := controller.SimpleControllerClientBuilder{
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
|
@ -369,7 +369,7 @@ type ProxyServer struct {
|
|||
ConntrackConfiguration componentconfig.KubeProxyConntrackConfiguration
|
||||
Conntracker Conntracker // if nil, ignored
|
||||
ProxyMode string
|
||||
NodeRef *clientv1.ObjectReference
|
||||
NodeRef *v1.ObjectReference
|
||||
CleanupAndExit bool
|
||||
MetricsBindAddress string
|
||||
EnableProfiling bool
|
||||
|
@ -460,7 +460,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx
|
|||
// Create event recorder
|
||||
hostname := utilnode.GetHostname(config.HostnameOverride)
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme, clientv1.EventSource{Component: "kube-proxy", Host: hostname})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme, v1.EventSource{Component: "kube-proxy", Host: hostname})
|
||||
|
||||
var healthzServer *healthcheck.HealthzServer
|
||||
var healthzUpdater healthcheck.HealthzUpdater
|
||||
|
@ -572,7 +572,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx
|
|||
iptInterface.AddReloadFunc(proxier.Sync)
|
||||
}
|
||||
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: hostname,
|
||||
UID: types.UID(hostname),
|
||||
|
|
|
@ -36,7 +36,6 @@ import (
|
|||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -344,7 +343,7 @@ func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubele
|
|||
return
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: componentKubelet, Host: string(nodeName)})
|
||||
kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: componentKubelet, Host: string(nodeName)})
|
||||
eventBroadcaster.StartLogging(glog.V(3).Infof)
|
||||
if kubeDeps.EventClient != nil {
|
||||
glog.V(4).Infof("Sending events to api server.")
|
||||
|
|
|
@ -19,7 +19,7 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/util/flag"
|
||||
clientgoclientset "k8s.io/client-go/kubernetes"
|
||||
|
@ -142,7 +142,7 @@ func main() {
|
|||
sysctl := fakesysctl.NewFake()
|
||||
execer := &fakeexec.FakeExec{}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "kube-proxy", Host: config.NodeName})
|
||||
recorder := eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "kube-proxy", Host: config.NodeName})
|
||||
|
||||
hollowProxy, err := kubemark.NewHollowProxyOrDie(
|
||||
config.NodeName,
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -124,7 +123,7 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
|
|||
glog.V(4).Infof("->NewIngressController V(4)")
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
|
||||
recorder := broadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: UserAgentName})
|
||||
recorder := broadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: UserAgentName})
|
||||
ic := &IngressController{
|
||||
federatedApiClient: client,
|
||||
ingressReviewDelay: time.Second * 10,
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -102,7 +101,7 @@ type ServiceController struct {
|
|||
func New(federationClient fedclientset.Interface) *ServiceController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(federationClient))
|
||||
recorder := broadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: UserAgentName})
|
||||
recorder := broadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: UserAgentName})
|
||||
|
||||
s := &ServiceController{
|
||||
federationClient: federationClient,
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -109,7 +109,7 @@ func StartFederationSyncController(kind string, adapterFactory federatedtypes.Ad
|
|||
func newFederationSyncController(client federationclientset.Interface, adapter federatedtypes.FederatedTypeAdapter) *FederationSyncController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
|
||||
recorder := broadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: fmt.Sprintf("federation-%v-controller", adapter.Kind())})
|
||||
recorder := broadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: fmt.Sprintf("federation-%v-controller", adapter.Kind())})
|
||||
|
||||
s := &FederationSyncController{
|
||||
reviewDelay: time.Second * 10,
|
||||
|
|
|
@ -19,8 +19,7 @@ package eventsink
|
|||
import (
|
||||
"reflect"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
kubev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -51,8 +50,8 @@ var scheme = runtime.NewScheme()
|
|||
func init() {
|
||||
// register client-go's and kube's Event type under two different GroupVersions
|
||||
// TODO: switch to client-go client for events
|
||||
scheme.AddKnownTypes(clientv1.SchemeGroupVersion, &clientv1.Event{})
|
||||
scheme.AddKnownTypes(schema.GroupVersion{Group: "fake-kube-" + kubev1.SchemeGroupVersion.Group, Version: kubev1.SchemeGroupVersion.Version}, &kubev1.Event{})
|
||||
scheme.AddKnownTypes(v1.SchemeGroupVersion, &v1.Event{})
|
||||
scheme.AddKnownTypes(schema.GroupVersion{Group: "fake-kube-" + v1.SchemeGroupVersion.Group, Version: v1.SchemeGroupVersion.Version}, &v1.Event{})
|
||||
|
||||
if err := scheme.AddConversionFuncs(
|
||||
metav1.Convert_unversioned_Time_To_unversioned_Time,
|
||||
|
@ -72,8 +71,8 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func (fes *FederatedEventSink) Create(event *clientv1.Event) (*clientv1.Event, error) {
|
||||
kubeEvent := &kubev1.Event{}
|
||||
func (fes *FederatedEventSink) Create(event *v1.Event) (*v1.Event, error) {
|
||||
kubeEvent := &v1.Event{}
|
||||
if err := scheme.Convert(event, kubeEvent, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -83,15 +82,15 @@ func (fes *FederatedEventSink) Create(event *clientv1.Event) (*clientv1.Event, e
|
|||
return nil, err
|
||||
}
|
||||
|
||||
retEvent := &clientv1.Event{}
|
||||
retEvent := &v1.Event{}
|
||||
if err := scheme.Convert(ret, retEvent, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return retEvent, nil
|
||||
}
|
||||
|
||||
func (fes *FederatedEventSink) Update(event *clientv1.Event) (*clientv1.Event, error) {
|
||||
kubeEvent := &kubev1.Event{}
|
||||
func (fes *FederatedEventSink) Update(event *v1.Event) (*v1.Event, error) {
|
||||
kubeEvent := &v1.Event{}
|
||||
if err := scheme.Convert(event, kubeEvent, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -101,15 +100,15 @@ func (fes *FederatedEventSink) Update(event *clientv1.Event) (*clientv1.Event, e
|
|||
return nil, err
|
||||
}
|
||||
|
||||
retEvent := &clientv1.Event{}
|
||||
retEvent := &v1.Event{}
|
||||
if err := scheme.Convert(ret, retEvent, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return retEvent, nil
|
||||
}
|
||||
|
||||
func (fes *FederatedEventSink) Patch(event *clientv1.Event, data []byte) (*clientv1.Event, error) {
|
||||
kubeEvent := &kubev1.Event{}
|
||||
func (fes *FederatedEventSink) Patch(event *v1.Event, data []byte) (*v1.Event, error) {
|
||||
kubeEvent := &v1.Event{}
|
||||
if err := scheme.Convert(event, kubeEvent, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -119,7 +118,7 @@ func (fes *FederatedEventSink) Patch(event *clientv1.Event, data []byte) (*clien
|
|||
return nil, err
|
||||
}
|
||||
|
||||
retEvent := &clientv1.Event{}
|
||||
retEvent := &v1.Event{}
|
||||
if err := scheme.Convert(ret, retEvent, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -19,8 +19,7 @@ package eventsink
|
|||
import (
|
||||
"testing"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
kubev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
@ -47,7 +46,7 @@ func TestEventSink(t *testing.T) {
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
event := clientv1.Event{
|
||||
event := v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bzium",
|
||||
Namespace: "ns",
|
||||
|
@ -56,7 +55,7 @@ func TestEventSink(t *testing.T) {
|
|||
sink := NewFederatedEventSink(fakeFederationClient)
|
||||
eventUpdated, err := sink.Create(&event)
|
||||
assert.NoError(t, err)
|
||||
eventV1 := GetObjectFromChan(createdChan).(*kubev1.Event)
|
||||
eventV1 := GetObjectFromChan(createdChan).(*v1.Event)
|
||||
assert.NotNil(t, eventV1)
|
||||
// Just some simple sanity checks.
|
||||
assert.Equal(t, event.Name, eventV1.Name)
|
||||
|
@ -64,7 +63,7 @@ func TestEventSink(t *testing.T) {
|
|||
|
||||
eventUpdated, err = sink.Update(&event)
|
||||
assert.NoError(t, err)
|
||||
eventV1 = GetObjectFromChan(updateChan).(*kubev1.Event)
|
||||
eventV1 = GetObjectFromChan(updateChan).(*v1.Event)
|
||||
assert.NotNil(t, eventV1)
|
||||
// Just some simple sanity checks.
|
||||
assert.Equal(t, event.Name, eventV1.Name)
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -80,7 +79,7 @@ func NewCloudNodeController(
|
|||
nodeStatusUpdateFrequency time.Duration) *CloudNodeController {
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloudcontrollermanager"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudcontrollermanager"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -110,7 +109,7 @@ func TestNodeDeleted(t *testing.T) {
|
|||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: &fakecloud.FakeCloud{Err: cloudprovider.InstanceNotFound},
|
||||
nodeMonitorPeriod: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
@ -190,7 +189,7 @@ func TestNodeInitialized(t *testing.T) {
|
|||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
@ -261,7 +260,7 @@ func TestNodeIgnored(t *testing.T) {
|
|||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
|
@ -338,7 +337,7 @@ func TestGCECondition(t *testing.T) {
|
|||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
|
@ -434,7 +433,7 @@ func TestZoneInitialized(t *testing.T) {
|
|||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
|
@ -531,7 +530,7 @@ func TestNodeAddresses(t *testing.T) {
|
|||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
|
@ -650,7 +649,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
|
|||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
|
|
|
@ -38,7 +38,6 @@ import (
|
|||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -82,7 +81,7 @@ func NewCronJobController(kubeClient clientset.Interface) *CronJobController {
|
|||
jobControl: realJobControl{KubeClient: kubeClient},
|
||||
sjControl: &realSJControl{KubeClient: kubeClient},
|
||||
podControl: &realPodControl{KubeClient: kubeClient},
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cronjob-controller"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cronjob-controller"}),
|
||||
}
|
||||
|
||||
return jm
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -134,10 +133,10 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo
|
|||
}
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "daemonset-controller"}),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "daemon-set"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemon-set"}),
|
||||
},
|
||||
crControl: controller.RealControllerRevisionControl{
|
||||
KubeClient: kubeClient,
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -109,7 +108,7 @@ func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, r
|
|||
}
|
||||
dc := &DeploymentController{
|
||||
client: client,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "deployment-controller"}),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "deployment-controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||
}
|
||||
dc.rsControl = controller.RealRSControl{
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -125,7 +124,7 @@ func NewDisruptionController(
|
|||
recheckQueue: workqueue.NewNamedDelayingQueue("disruption-recheck"),
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
dc.recorder = dc.broadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "controllermanager"})
|
||||
dc.recorder = dc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
|
||||
|
||||
dc.getUpdater = func() updater { return dc.writePdbStatus }
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -93,11 +92,11 @@ func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchin
|
|||
kubeClient: kubeClient,
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "job-controller"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "job-controller"}),
|
||||
},
|
||||
expectations: controller.NewControllerExpectations(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "job"),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "job-controller"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "job-controller"}),
|
||||
}
|
||||
|
||||
jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -66,7 +65,7 @@ func NewCloudCIDRAllocator(
|
|||
cloud: gceCloud,
|
||||
recorder: record.NewBroadcaster().NewRecorder(
|
||||
api.Scheme,
|
||||
clientv1.EventSource{Component: "cidrAllocator"}),
|
||||
v1.EventSource{Component: "cidrAllocator"}),
|
||||
}
|
||||
|
||||
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
@ -266,7 +265,7 @@ func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.Nod
|
|||
}
|
||||
|
||||
func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) {
|
||||
ref := &clientv1.ObjectReference{
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: nodeName,
|
||||
UID: types.UID(nodeUID),
|
||||
|
@ -277,7 +276,7 @@ func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype
|
|||
}
|
||||
|
||||
func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_status string) {
|
||||
ref := &clientv1.ObjectReference{
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: node.Name,
|
||||
UID: node.UID,
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -218,7 +217,7 @@ func NewNodeController(
|
|||
runTaintManager bool,
|
||||
useTaintBasedEvictions bool) (*NodeController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "controllermanager"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
@ -64,7 +63,7 @@ type rangeAllocator struct {
|
|||
// can initialize its CIDR map. NodeList is only nil in testing.
|
||||
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cidrAllocator"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if client != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
@ -153,7 +152,7 @@ func getMinTolerationTime(tolerations []v1.Toleration) time.Duration {
|
|||
// communicate with the API server.
|
||||
func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "controllermanager"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if c != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
|
@ -417,22 +416,22 @@ func (tc *NoExecuteTaintManager) emitPodDeletionEvent(nsName types.NamespacedNam
|
|||
if tc.recorder == nil {
|
||||
return
|
||||
}
|
||||
ref := &clientv1.ObjectReference{
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: nsName.Name,
|
||||
Namespace: nsName.Namespace,
|
||||
}
|
||||
tc.recorder.Eventf(ref, clientv1.EventTypeNormal, "TaintManagerEviction", "Marking for deletion Pod %s", nsName.String())
|
||||
tc.recorder.Eventf(ref, v1.EventTypeNormal, "TaintManagerEviction", "Marking for deletion Pod %s", nsName.String())
|
||||
}
|
||||
|
||||
func (tc *NoExecuteTaintManager) emitCancelPodDeletionEvent(nsName types.NamespacedName) {
|
||||
if tc.recorder == nil {
|
||||
return
|
||||
}
|
||||
ref := &clientv1.ObjectReference{
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: nsName.Name,
|
||||
Namespace: nsName.Namespace,
|
||||
}
|
||||
tc.recorder.Eventf(ref, clientv1.EventTypeNormal, "TaintManagerEviction", "Cancelling deletion of Pod %s", nsName.String())
|
||||
tc.recorder.Eventf(ref, v1.EventTypeNormal, "TaintManagerEviction", "Cancelling deletion of Pod %s", nsName.String())
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
|
||||
|
@ -342,8 +341,8 @@ func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, su
|
|||
// FakeRecorder is used as a fake during testing.
|
||||
type FakeRecorder struct {
|
||||
sync.Mutex
|
||||
source clientv1.EventSource
|
||||
Events []*clientv1.Event
|
||||
source v1.EventSource
|
||||
Events []*v1.Event
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
|
@ -376,14 +375,14 @@ func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time,
|
|||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) makeEvent(ref *clientv1.ObjectReference, eventtype, reason, message string) *clientv1.Event {
|
||||
func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event {
|
||||
t := metav1.Time{Time: f.clock.Now()}
|
||||
namespace := ref.Namespace
|
||||
if namespace == "" {
|
||||
namespace = metav1.NamespaceDefault
|
||||
}
|
||||
|
||||
clientref := clientv1.ObjectReference{
|
||||
clientref := v1.ObjectReference{
|
||||
Kind: ref.Kind,
|
||||
Namespace: ref.Namespace,
|
||||
Name: ref.Name,
|
||||
|
@ -393,7 +392,7 @@ func (f *FakeRecorder) makeEvent(ref *clientv1.ObjectReference, eventtype, reaso
|
|||
FieldPath: ref.FieldPath,
|
||||
}
|
||||
|
||||
return &clientv1.Event{
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
|
||||
Namespace: namespace,
|
||||
|
@ -411,8 +410,8 @@ func (f *FakeRecorder) makeEvent(ref *clientv1.ObjectReference, eventtype, reaso
|
|||
// NewFakeRecorder returns a pointer to a newly constructed FakeRecorder.
|
||||
func NewFakeRecorder() *FakeRecorder {
|
||||
return &FakeRecorder{
|
||||
source: clientv1.EventSource{Component: "nodeControllerTest"},
|
||||
Events: []*clientv1.Event{},
|
||||
source: v1.EventSource{Component: "nodeControllerTest"},
|
||||
Events: []*v1.Event{},
|
||||
clock: clock.NewFakeClock(time.Now()),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -113,7 +112,7 @@ func NewHorizontalController(
|
|||
broadcaster := record.NewBroadcaster()
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
||||
hpaController := &HorizontalController{
|
||||
replicaCalc: replicaCalc,
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -435,11 +434,11 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
int64(cpu),
|
||||
resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
|
@ -471,7 +470,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
|
||||
for i, level := range tc.reportedLevels {
|
||||
podMetric := cmapi.MetricValue{
|
||||
DescribedObject: clientv1.ObjectReference{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
|
@ -509,7 +508,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
|
||||
metrics.Items = []cmapi.MetricValue{
|
||||
{
|
||||
DescribedObject: clientv1.ObjectReference{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: matchedTarget.Object.Target.Kind,
|
||||
APIVersion: matchedTarget.Object.Target.APIVersion,
|
||||
Name: name,
|
||||
|
@ -559,7 +558,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
|||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.CreateAction).GetObject().(*clientv1.Event)
|
||||
obj := action.(core.CreateAction).GetObject().(*v1.Event)
|
||||
if tc.verifyEvents {
|
||||
switch obj.Reason {
|
||||
case "SuccessfulRescale":
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -332,11 +331,11 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
int64(cpu),
|
||||
resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
|
@ -464,7 +463,7 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
|||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.CreateAction).GetObject().(*clientv1.Event)
|
||||
obj := action.(core.CreateAction).GetObject().(*v1.Event)
|
||||
if tc.verifyEvents {
|
||||
switch obj.Reason {
|
||||
case "SuccessfulRescale":
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -127,8 +126,8 @@ func (tc *legacyReplicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clien
|
|||
for i := 0; i < numContainersPerPod; i++ {
|
||||
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("container%v", i),
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
int64(resValue),
|
||||
resource.DecimalSI),
|
||||
},
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -115,11 +114,11 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
for j, cpu := range containers {
|
||||
cm := metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
cpu,
|
||||
resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
|
||||
autoscaling "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -72,7 +71,7 @@ func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
|||
podSum := int64(0)
|
||||
missing := len(m.Containers) == 0
|
||||
for _, c := range m.Containers {
|
||||
resValue, found := c.Usage[clientv1.ResourceName(resource)]
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -158,8 +157,8 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
|||
for i := 0; i < numContainersPerPod; i++ {
|
||||
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("container%v", i),
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
int64(resValue),
|
||||
resource.DecimalSI),
|
||||
},
|
||||
|
@ -194,7 +193,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
|||
|
||||
for i, level := range tc.metric.levels {
|
||||
podMetric := cmapi.MetricValue{
|
||||
DescribedObject: clientv1.ObjectReference{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: testNamespace,
|
||||
|
@ -224,7 +223,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
|||
|
||||
metrics.Items = []cmapi.MetricValue{
|
||||
{
|
||||
DescribedObject: clientv1.ObjectReference{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: tc.metric.singleObject.Kind,
|
||||
APIVersion: tc.metric.singleObject.APIVersion,
|
||||
Name: name,
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -106,7 +105,7 @@ func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer,
|
|||
kubeClient: kubeClient,
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "replicaset-controller"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replicaset-controller"}),
|
||||
},
|
||||
burstReplicas: burstReplicas,
|
||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -102,7 +101,7 @@ func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer cor
|
|||
kubeClient: kubeClient,
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "replication-controller"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replication-controller"}),
|
||||
},
|
||||
burstReplicas: burstReplicas,
|
||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -107,7 +106,7 @@ func New(
|
|||
) (*ServiceController, error) {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "service-controller"})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"})
|
||||
|
||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -88,7 +87,7 @@ func NewStatefulSetController(
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "statefulset"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset"})
|
||||
|
||||
ssc := &StatefulSetController{
|
||||
kubeClient: kubeClient,
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -135,7 +134,7 @@ func NewAttachDetachController(
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "attachdetach"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach"})
|
||||
|
||||
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
|
||||
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
@ -72,7 +71,7 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
|
|||
if eventRecorder == nil {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(p.KubeClient.Core().RESTClient()).Events("")})
|
||||
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "persistentvolume-controller"})
|
||||
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
|
||||
}
|
||||
|
||||
controller := &PersistentVolumeController{
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
"net/url"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/rest/fake"
|
||||
|
@ -178,18 +178,18 @@ func testPodMetricsData() []metricsapi.PodMetrics {
|
|||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container1-1",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
|
||||
clientv1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container1-2",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(4, resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(5*(1024*1024), resource.DecimalSI),
|
||||
clientv1.ResourceStorage: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(4, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(5*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -200,26 +200,26 @@ func testPodMetricsData() []metricsapi.PodMetrics {
|
|||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container2-1",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
|
||||
clientv1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container2-2",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(11*(1024*1024), resource.DecimalSI),
|
||||
clientv1.ResourceStorage: *resource.NewQuantity(12*(1024*1024), resource.DecimalSI),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(11*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(12*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container2-3",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(13, resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(14*(1024*1024), resource.DecimalSI),
|
||||
clientv1.ResourceStorage: *resource.NewQuantity(15*(1024*1024), resource.DecimalSI),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(13, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(14*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(15*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -230,10 +230,10 @@ func testPodMetricsData() []metricsapi.PodMetrics {
|
|||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container3-1",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
|
||||
clientv1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
"testing"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -68,19 +68,19 @@ func testNodeMetricsData() (*metricsapi.NodeMetricsList, *api.NodeList) {
|
|||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1", ResourceVersion: "10"},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
|
||||
clientv1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2", ResourceVersion: "11"},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(5, resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
|
||||
clientv1.ResourceStorage: *resource.NewQuantity(7*(1024*1024), resource.DecimalSI),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(5, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(7*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
|
@ -73,7 +72,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
|||
}
|
||||
|
||||
// Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail.
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: cm.nodeInfo.Name,
|
||||
UID: types.UID(cm.nodeInfo.Name),
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -88,7 +87,7 @@ func CreatePodUpdate(op kubetypes.PodOperation, source string, pods ...*v1.Pod)
|
|||
|
||||
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubetypes.PodUpdate, *PodConfig) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "kubelet"}))
|
||||
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kubelet"}))
|
||||
channel := config.Channel(TestSource)
|
||||
ch := config.Updates()
|
||||
return channel, ch, config
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -68,7 +67,7 @@ type managerImpl struct {
|
|||
// captures when a node condition was last observed based on a threshold being met
|
||||
nodeConditionsLastObservedAt nodeConditionsObservedAt
|
||||
// nodeRef is a reference to the node
|
||||
nodeRef *clientv1.ObjectReference
|
||||
nodeRef *v1.ObjectReference
|
||||
// used to record events about the node
|
||||
recorder record.EventRecorder
|
||||
// used to measure usage stats on system
|
||||
|
@ -100,7 +99,7 @@ func NewManager(
|
|||
imageGC ImageGC,
|
||||
containerGC ContainerGC,
|
||||
recorder record.EventRecorder,
|
||||
nodeRef *clientv1.ObjectReference,
|
||||
nodeRef *v1.ObjectReference,
|
||||
clock clock.Clock) (Manager, lifecycle.PodAdmitHandler) {
|
||||
manager := &managerImpl{
|
||||
clock: clock,
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
@ -202,7 +201,7 @@ func TestMemoryPressure(t *testing.T) {
|
|||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
imageGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
|
@ -420,7 +419,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
|
@ -619,7 +618,7 @@ func TestMinReclaim(t *testing.T) {
|
|||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
|
@ -760,7 +759,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
imageGcFree := resource.MustParse("700Mi")
|
||||
diskGC := &mockDiskGC{imageBytesFreed: imageGcFree.Value(), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
|
@ -957,7 +956,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
|
@ -1159,7 +1158,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
|||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: "",
|
||||
}
|
||||
|
||||
|
@ -1292,7 +1291,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
|||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -86,7 +85,7 @@ type realImageGCManager struct {
|
|||
recorder record.EventRecorder
|
||||
|
||||
// Reference to this node.
|
||||
nodeRef *clientv1.ObjectReference
|
||||
nodeRef *v1.ObjectReference
|
||||
|
||||
// Track initialization
|
||||
initialized bool
|
||||
|
@ -129,7 +128,7 @@ type imageRecord struct {
|
|||
size int64
|
||||
}
|
||||
|
||||
func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *clientv1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
|
||||
func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
|
||||
// Validate policy.
|
||||
if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {
|
||||
return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent)
|
||||
|
|
|
@ -37,7 +37,6 @@ import (
|
|||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -409,7 +408,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dep
|
|||
// TODO: get the real node object of ourself,
|
||||
// and use the real node name and UID.
|
||||
// TODO: what is namespace for node?
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string(nodeName),
|
||||
UID: types.UID(nodeName),
|
||||
|
@ -956,7 +955,7 @@ type Kubelet struct {
|
|||
// Indicates that the node initialization happens in an external cloud controller
|
||||
externalCloudProvider bool
|
||||
// Reference to this node.
|
||||
nodeRef *clientv1.ObjectReference
|
||||
nodeRef *v1.ObjectReference
|
||||
|
||||
// Container runtime.
|
||||
containerRuntime kubecontainer.Runtime
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -233,7 +232,7 @@ func newTestKubeletWithImageList(
|
|||
kubelet.livenessManager = proberesults.NewManager()
|
||||
|
||||
kubelet.containerManager = cm.NewStubContainerManager()
|
||||
fakeNodeRef := &clientv1.ObjectReference{
|
||||
fakeNodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: testKubeletHostname,
|
||||
UID: types.UID(testKubeletHostname),
|
||||
|
@ -263,7 +262,7 @@ func newTestKubeletWithImageList(
|
|||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
volumeStatsAggPeriod := time.Second * 10
|
||||
kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime)
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string(kubelet.nodeName),
|
||||
UID: types.UID(kubelet.nodeName),
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
@ -117,7 +116,7 @@ func TestRunOnce(t *testing.T) {
|
|||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
volumeStatsAggPeriod := time.Second * 10
|
||||
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime)
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string(kb.nodeName),
|
||||
UID: types.UID(kb.nodeName),
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
@ -106,7 +106,7 @@ func NewHollowProxyOrDie(
|
|||
}
|
||||
|
||||
// Create a Hollow Proxy instance.
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: nodeName,
|
||||
UID: types.UID(nodeName),
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/renstrom/dedent"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
@ -156,7 +156,7 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err
|
|||
|
||||
if hcs.recorder != nil {
|
||||
hcs.recorder.Eventf(
|
||||
&clientv1.ObjectReference{
|
||||
&v1.ObjectReference{
|
||||
Kind: "Service",
|
||||
Namespace: nsn.Namespace,
|
||||
Name: nsn.Name,
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -1235,7 +1235,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
msg := fmt.Sprintf("can't open %s, skipping this externalIP: %v", lp.String(), err)
|
||||
|
||||
proxier.recorder.Eventf(
|
||||
&clientv1.ObjectReference{
|
||||
&v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: proxier.hostname,
|
||||
UID: types.UID(proxier.hostname),
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
|
@ -53,7 +53,7 @@ func createRecorder(kubecli *clientset.Clientset, s *options.SchedulerServer) re
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.Core().RESTClient()).Events("")})
|
||||
return eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: s.SchedulerName})
|
||||
return eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: s.SchedulerName})
|
||||
}
|
||||
|
||||
func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) {
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -177,13 +176,13 @@ func TestScheduler(t *testing.T) {
|
|||
NextPod: func() *v1.Pod {
|
||||
return item.sendPod
|
||||
},
|
||||
Recorder: eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "scheduler"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "scheduler"}),
|
||||
},
|
||||
}
|
||||
|
||||
s, _ := NewFromConfigurator(configurator, nil...)
|
||||
called := make(chan struct{})
|
||||
events := eventBroadcaster.StartEventWatcher(func(e *clientv1.Event) {
|
||||
events := eventBroadcaster.StartEventWatcher(func(e *v1.Event) {
|
||||
if e, a := item.eventReason, e.Reason; e != a {
|
||||
t.Errorf("%v: expected %v, got %v", i, e, a)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -37,8 +36,8 @@ import (
|
|||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func stagingClientPod(name, value string) clientv1.Pod {
|
||||
return clientv1.Pod{
|
||||
func stagingClientPod(name, value string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
|
@ -46,12 +45,12 @@ func stagingClientPod(name, value string) clientv1.Pod {
|
|||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: clientv1.PodSpec{
|
||||
Containers: []clientv1.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "gcr.io/google_containers/nginx-slim:0.7",
|
||||
Ports: []clientv1.ContainerPort{{ContainerPort: 80}},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -377,7 +376,7 @@ func TestSchedulerExtender(t *testing.T) {
|
|||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientSet.Core().RESTClient()).Events("")})
|
||||
scheduler, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
|
||||
informerFactory.Start(schedulerConfig.StopEverything)
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -130,7 +129,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating scheduler: %v", err)
|
||||
|
@ -181,7 +180,7 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
|||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
)
|
||||
|
||||
if err == nil {
|
||||
|
@ -219,7 +218,7 @@ func TestSchedulerCreationInLegacyMode(t *testing.T) {
|
|||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Creation of scheduler in legacy mode failed: %v", err)
|
||||
|
@ -512,7 +511,7 @@ func TestMultiScheduler(t *testing.T) {
|
|||
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster2 := record.NewBroadcaster()
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.Scheme, clientv1.EventSource{Component: fooScheduler})
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.Scheme, v1.EventSource{Component: fooScheduler})
|
||||
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet2.CoreV1().RESTClient()).Events("")})
|
||||
informerFactory2.Start(schedulerConfig2.StopEverything)
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -86,7 +85,7 @@ func initTest(t *testing.T, nsPrefix string) *TestContext {
|
|||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(context.clientSet.CoreV1().RESTClient()).Events("")})
|
||||
context.informerFactory.Start(context.schedulerConfig.StopEverything)
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: context.schedulerConfig}, nil...)
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
@ -83,7 +82,7 @@ func mustSetupScheduler() (schedulerConfigurator scheduler.Configurator, destroy
|
|||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(schedulerConfigurator, func(conf *scheduler.Config) {
|
||||
conf.Recorder = eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "scheduler"})
|
||||
conf.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "scheduler"})
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating scheduler: %v", err)
|
||||
|
|
Loading…
Reference in New Issue