mirror of https://github.com/k3s-io/k3s
Merge pull request #20452 from caesarxuchao/replace-client-kubelet
Auto commit by PR queue botpull/6/head
commit
843c11e06a
|
@ -187,14 +187,14 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
|||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
// ensure the service endpoints are sync'd several times within the window that the integration tests wait
|
||||
go endpointcontroller.NewEndpointController(cl, controller.NoResyncPeriodFunc).
|
||||
go endpointcontroller.NewEndpointController(clientset, controller.NoResyncPeriodFunc).
|
||||
Run(3, util.NeverStop)
|
||||
|
||||
// TODO: Write an integration test for the replication controllers watch.
|
||||
go replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas).
|
||||
Run(3, util.NeverStop)
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
|
||||
nodeController := nodecontroller.NewNodeController(nil, clientset, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
|
||||
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
|
||||
nodeController.Run(5 * time.Second)
|
||||
cadvisorInterface := new(cadvisor.Fake)
|
||||
|
@ -205,7 +205,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
|||
glog.Infof("Using %s as root dir for kubelet #1", testRootDir)
|
||||
cm := cm.NewStubContainerManager()
|
||||
kcfg := kubeletapp.SimpleKubelet(
|
||||
cl,
|
||||
clientset,
|
||||
fakeDocker1,
|
||||
"localhost",
|
||||
testRootDir,
|
||||
|
@ -237,7 +237,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
|||
glog.Infof("Using %s as root dir for kubelet #2", testRootDir)
|
||||
|
||||
kcfg = kubeletapp.SimpleKubelet(
|
||||
cl,
|
||||
clientset,
|
||||
fakeDocker2,
|
||||
"127.0.0.1",
|
||||
testRootDir,
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apiserver"
|
||||
"k8s.io/kubernetes/pkg/apiserver/authenticator"
|
||||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
|
@ -255,9 +256,9 @@ func Run(s *options.APIServer) error {
|
|||
clientConfig.GroupVersion = &gv
|
||||
}
|
||||
|
||||
client, err := client.New(clientConfig)
|
||||
client, err := clientset.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Invalid server address: %v", err)
|
||||
glog.Errorf("Failed to create clientset: %v", err)
|
||||
}
|
||||
|
||||
legacyV1Group, err := registered.Group(api.GroupName)
|
||||
|
|
|
@ -173,7 +173,7 @@ func Run(s *options.CMServer) error {
|
|||
}
|
||||
|
||||
func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *client.Config, stop <-chan struct{}) error {
|
||||
go endpointcontroller.NewEndpointController(client.NewOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")), ResyncPeriod(s)).
|
||||
go endpointcontroller.NewEndpointController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")), ResyncPeriod(s)).
|
||||
Run(s.ConcurrentEndpointSyncs, util.NeverStop)
|
||||
|
||||
go replicationcontroller.NewReplicationManager(
|
||||
|
@ -183,7 +183,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
).Run(s.ConcurrentRCSyncs, util.NeverStop)
|
||||
|
||||
if s.TerminatedPodGCThreshold > 0 {
|
||||
go gc.New(client.NewOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.TerminatedPodGCThreshold).
|
||||
go gc.New(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.TerminatedPodGCThreshold).
|
||||
Run(util.NeverStop)
|
||||
}
|
||||
|
||||
|
@ -192,13 +192,13 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
glog.Fatalf("Cloud provider could not be initialized: %v", err)
|
||||
}
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(cloud, client.NewOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
|
||||
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
|
||||
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod)
|
||||
|
||||
serviceController := servicecontroller.New(cloud, client.NewOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
|
||||
serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
|
||||
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
|
||||
glog.Errorf("Failed to start service controller: %v", err)
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
} else if routes, ok := cloud.Routes(); !ok {
|
||||
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
|
||||
} else {
|
||||
routeController := routecontroller.New(routes, client.NewOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, &s.ClusterCIDR)
|
||||
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, &s.ClusterCIDR)
|
||||
routeController.Run(s.NodeSyncPeriod)
|
||||
}
|
||||
} else {
|
||||
|
@ -217,7 +217,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
}
|
||||
|
||||
go resourcequotacontroller.NewResourceQuotaController(
|
||||
client.NewOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller")),
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller")),
|
||||
controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop)
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
|
@ -240,7 +240,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
glog.Fatalf("Failed to get supported resources from server: %v", err)
|
||||
}
|
||||
|
||||
namespacecontroller.NewNamespaceController(client.NewOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod).Run()
|
||||
namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod).Run()
|
||||
|
||||
groupVersion := "extensions/v1beta1"
|
||||
resources, found := resourceMap[groupVersion]
|
||||
|
@ -249,7 +249,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
glog.Infof("Starting %s apis", groupVersion)
|
||||
if containsResource(resources, "horizontalpodautoscalers") {
|
||||
glog.Infof("Starting horizontal pod controller.")
|
||||
hpaClient := client.NewOrDie(client.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
|
||||
hpaClient := clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(
|
||||
hpaClient,
|
||||
metrics.DefaultHeapsterNamespace,
|
||||
|
@ -257,7 +257,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
metrics.DefaultHeapsterService,
|
||||
metrics.DefaultHeapsterPort,
|
||||
)
|
||||
podautoscaler.NewHorizontalController(hpaClient, hpaClient, hpaClient, metricsClient).
|
||||
podautoscaler.NewHorizontalController(hpaClient.Legacy(), hpaClient.Extensions(), hpaClient, metricsClient).
|
||||
Run(s.HorizontalPodAutoscalerSyncPeriod)
|
||||
}
|
||||
|
||||
|
@ -323,7 +323,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
glog.Errorf("Error reading key for service account token controller: %v", err)
|
||||
} else {
|
||||
serviceaccountcontroller.NewTokensController(
|
||||
client.NewOrDie(client.AddUserAgent(kubeconfig, "tokens-controller")),
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "tokens-controller")),
|
||||
serviceaccountcontroller.TokensControllerOptions{
|
||||
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
|
||||
RootCA: rootCA,
|
||||
|
@ -333,7 +333,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
}
|
||||
|
||||
serviceaccountcontroller.NewServiceAccountsController(
|
||||
client.NewOrDie(client.AddUserAgent(kubeconfig, "service-account-controller")),
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-account-controller")),
|
||||
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
|
||||
).Run()
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/chaosclient"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
|
@ -266,13 +267,13 @@ func Run(s *options.KubeletServer, kcfg *KubeletConfig) error {
|
|||
|
||||
clientConfig, err := CreateAPIServerClientConfig(s)
|
||||
if err == nil {
|
||||
kcfg.KubeClient, err = client.New(clientConfig)
|
||||
kcfg.KubeClient, err = clientset.NewForConfig(clientConfig)
|
||||
|
||||
// make a separate client for events
|
||||
eventClientConfig := *clientConfig
|
||||
eventClientConfig.QPS = s.EventRecordQPS
|
||||
eventClientConfig.Burst = s.EventBurst
|
||||
kcfg.EventClient, err = client.New(&eventClientConfig)
|
||||
kcfg.EventClient, err = clientset.NewForConfig(&eventClientConfig)
|
||||
}
|
||||
if err != nil && len(s.APIServerList) > 0 {
|
||||
glog.Warningf("No API client: %v", err)
|
||||
|
@ -452,7 +453,7 @@ func addChaosToClientConfig(s *options.KubeletServer, config *client.Config) {
|
|||
|
||||
// SimpleRunKubelet is a simple way to start a Kubelet talking to dockerEndpoint, using an API Client.
|
||||
// Under the hood it calls RunKubelet (below)
|
||||
func SimpleKubelet(client *client.Client,
|
||||
func SimpleKubelet(client *clientset.Clientset,
|
||||
dockerClient dockertools.DockerInterface,
|
||||
hostname, rootDir, manifestURL, address string,
|
||||
port uint,
|
||||
|
@ -563,7 +564,7 @@ func RunKubelet(kcfg *KubeletConfig) error {
|
|||
eventBroadcaster.StartLogging(glog.V(3).Infof)
|
||||
if kcfg.EventClient != nil {
|
||||
glog.V(4).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(kcfg.EventClient.Events(""))
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kcfg.EventClient.Events("")})
|
||||
} else {
|
||||
glog.Warning("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
|
@ -666,7 +667,7 @@ type KubeletConfig struct {
|
|||
DockerExecHandler dockertools.ExecHandler
|
||||
EnableDebuggingHandlers bool
|
||||
EnableServer bool
|
||||
EventClient *client.Client
|
||||
EventClient *clientset.Clientset
|
||||
EventBurst int
|
||||
EventRecordQPS float32
|
||||
FileCheckFrequency time.Duration
|
||||
|
@ -677,7 +678,7 @@ type KubeletConfig struct {
|
|||
HostIPCSources []string
|
||||
HTTPCheckFrequency time.Duration
|
||||
ImageGCPolicy kubelet.ImageGCPolicy
|
||||
KubeClient *client.Client
|
||||
KubeClient *clientset.Clientset
|
||||
ManifestURL string
|
||||
ManifestURLHeader http.Header
|
||||
MasterServiceNamespace string
|
||||
|
@ -733,12 +734,10 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
|
|||
// TODO: KubeletConfig.KubeClient should be a client interface, but client interface misses certain methods
|
||||
// used by kubelet. Since NewMainKubelet expects a client interface, we need to make sure we are not passing
|
||||
// a nil pointer to it when what we really want is a nil interface.
|
||||
var kubeClient client.Interface
|
||||
var c clientset.Interface
|
||||
var kubeClient clientset.Interface
|
||||
if kc.KubeClient != nil {
|
||||
kubeClient = kc.KubeClient
|
||||
// TODO: remove this when we've refactored kubelet to only use clientset.
|
||||
c = clientset.FromUnversionedClient(kc.KubeClient)
|
||||
}
|
||||
|
||||
gcPolicy := kubecontainer.ContainerGCPolicy{
|
||||
|
@ -760,7 +759,6 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
|
|||
kc.NodeName,
|
||||
kc.DockerClient,
|
||||
kubeClient,
|
||||
c,
|
||||
kc.RootDirectory,
|
||||
kc.PodInfraContainerImage,
|
||||
kc.SyncFrequency,
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
docker "github.com/fsouza/go-dockerclient"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
|
@ -88,6 +89,7 @@ func main() {
|
|||
|
||||
// create a client to communicate with API server.
|
||||
cl, err := createClientFromFile(config.KubeconfigPath)
|
||||
clientset := clientset.FromUnversionedClient(cl)
|
||||
if err != nil {
|
||||
glog.Fatal("Failed to create a Client. Exiting.")
|
||||
}
|
||||
|
@ -102,7 +104,7 @@ func main() {
|
|||
|
||||
hollowKubelet := kubemark.NewHollowKubelet(
|
||||
config.NodeName,
|
||||
cl,
|
||||
clientset,
|
||||
cadvisorInterface,
|
||||
fakeDockerClient,
|
||||
config.KubeletPort,
|
||||
|
|
|
@ -60,6 +60,7 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) {
|
|||
version := normalization.Version(gv.Version)
|
||||
typedClientPath := filepath.Join(g.typedClientPath, group, version)
|
||||
imports = append(imports, fmt.Sprintf("%s_%s \"%s\"", group, version, typedClientPath))
|
||||
imports = append(imports, "github.com/golang/glog")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -144,14 +145,14 @@ func NewForConfig(c *$.Config|raw$) (*Clientset, error) {
|
|||
var err error
|
||||
$range .allGroups$ clientset.$.Group$Client, err =$.PackageName$.NewForConfig(c)
|
||||
if err!=nil {
|
||||
return nil, err
|
||||
return &clientset, err
|
||||
}
|
||||
$end$
|
||||
clientset.DiscoveryClient, err = $.NewDiscoveryClientForConfig|raw$(c)
|
||||
if err!=nil {
|
||||
return nil, err
|
||||
glog.Errorf("failed to create the DiscoveryClient: %v", err)
|
||||
}
|
||||
return &clientset, nil
|
||||
return &clientset, err
|
||||
}
|
||||
`
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package test_release_1_1
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
testgroup_unversioned "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testoutput/testgroup/unversioned"
|
||||
unversioned "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
@ -49,14 +50,14 @@ func NewForConfig(c *unversioned.Config) (*Clientset, error) {
|
|||
var err error
|
||||
clientset.TestgroupClient, err = testgroup_unversioned.NewForConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return &clientset, err
|
||||
}
|
||||
|
||||
clientset.DiscoveryClient, err = unversioned.NewDiscoveryClientForConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
glog.Errorf("failed to create the DiscoveryClient: %v", err)
|
||||
}
|
||||
return &clientset, nil
|
||||
return &clientset, err
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new Clientset for the given config and
|
||||
|
|
|
@ -127,14 +127,14 @@ func (s *CMServer) Run(_ []string) error {
|
|||
glog.Fatal(server.ListenAndServe())
|
||||
}()
|
||||
|
||||
endpoints := s.createEndpointController(client.NewOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")))
|
||||
endpoints := s.createEndpointController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")))
|
||||
go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
|
||||
|
||||
go replicationcontroller.NewReplicationManager(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas).
|
||||
Run(s.ConcurrentRCSyncs, util.NeverStop)
|
||||
|
||||
if s.TerminatedPodGCThreshold > 0 {
|
||||
go gc.New(client.NewOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), s.resyncPeriod, s.TerminatedPodGCThreshold).
|
||||
go gc.New(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), s.resyncPeriod, s.TerminatedPodGCThreshold).
|
||||
Run(util.NeverStop)
|
||||
}
|
||||
|
||||
|
@ -147,18 +147,18 @@ func (s *CMServer) Run(_ []string) error {
|
|||
glog.Fatalf("Cloud provider could not be initialized: %v", err)
|
||||
}
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(cloud, client.NewOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
|
||||
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
|
||||
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod)
|
||||
|
||||
nodeStatusUpdaterController := node.NewStatusUpdater(client.NewOrDie(client.AddUserAgent(kubeconfig, "node-status-controller")), s.NodeMonitorPeriod, time.Now)
|
||||
nodeStatusUpdaterController := node.NewStatusUpdater(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-status-controller")), s.NodeMonitorPeriod, time.Now)
|
||||
if err := nodeStatusUpdaterController.Run(util.NeverStop); err != nil {
|
||||
glog.Fatalf("Failed to start node status update controller: %v", err)
|
||||
}
|
||||
|
||||
serviceController := servicecontroller.New(cloud, client.NewOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
|
||||
serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
|
||||
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
|
||||
glog.Errorf("Failed to start service controller: %v", err)
|
||||
}
|
||||
|
@ -168,12 +168,12 @@ func (s *CMServer) Run(_ []string) error {
|
|||
if !ok {
|
||||
glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set")
|
||||
}
|
||||
routeController := routecontroller.New(routes, client.NewOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
|
||||
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
|
||||
routeController.Run(s.NodeSyncPeriod)
|
||||
}
|
||||
|
||||
go resourcequotacontroller.NewResourceQuotaController(
|
||||
client.NewOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller")), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop)
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller")), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop)
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
|
@ -195,7 +195,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
glog.Fatalf("Failed to get supported resources from server: %v", err)
|
||||
}
|
||||
|
||||
namespaceController := namespacecontroller.NewNamespaceController(client.NewOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), &unversioned.APIVersions{}, s.NamespaceSyncPeriod)
|
||||
namespaceController := namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), &unversioned.APIVersions{}, s.NamespaceSyncPeriod)
|
||||
namespaceController.Run()
|
||||
|
||||
groupVersion := "extensions/v1beta1"
|
||||
|
@ -205,7 +205,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
glog.Infof("Starting %s apis", groupVersion)
|
||||
if containsResource(resources, "horizontalpodautoscalers") {
|
||||
glog.Infof("Starting horizontal pod controller.")
|
||||
hpaClient := client.NewOrDie(client.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
|
||||
hpaClient := clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(
|
||||
hpaClient,
|
||||
metrics.DefaultHeapsterNamespace,
|
||||
|
@ -213,7 +213,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
metrics.DefaultHeapsterService,
|
||||
metrics.DefaultHeapsterPort,
|
||||
)
|
||||
podautoscaler.NewHorizontalController(hpaClient, hpaClient, hpaClient, metricsClient).
|
||||
podautoscaler.NewHorizontalController(hpaClient.Legacy(), hpaClient.Extensions(), hpaClient, metricsClient).
|
||||
Run(s.HorizontalPodAutoscalerSyncPeriod)
|
||||
}
|
||||
|
||||
|
@ -279,7 +279,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
glog.Errorf("Error reading key for service account token controller: %v", err)
|
||||
} else {
|
||||
serviceaccountcontroller.NewTokensController(
|
||||
client.NewOrDie(client.AddUserAgent(kubeconfig, "tokens-controller")),
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "tokens-controller")),
|
||||
serviceaccountcontroller.TokensControllerOptions{
|
||||
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
|
||||
RootCA: rootCA,
|
||||
|
@ -289,14 +289,14 @@ func (s *CMServer) Run(_ []string) error {
|
|||
}
|
||||
|
||||
serviceaccountcontroller.NewServiceAccountsController(
|
||||
client.NewOrDie(client.AddUserAgent(kubeconfig, "service-account-controller")),
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-account-controller")),
|
||||
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
|
||||
).Run()
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func (s *CMServer) createEndpointController(client *client.Client) kmendpoint.EndpointController {
|
||||
func (s *CMServer) createEndpointController(client *clientset.Clientset) kmendpoint.EndpointController {
|
||||
if s.UseHostPortEndpoints {
|
||||
glog.V(2).Infof("Creating hostIP:hostPort endpoint controller")
|
||||
return kmendpoint.NewEndpointController(client)
|
||||
|
|
|
@ -19,7 +19,7 @@ package executor
|
|||
import (
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/node"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
)
|
||||
|
||||
type kubeAPI interface {
|
||||
|
@ -33,11 +33,11 @@ type nodeAPI interface {
|
|||
// clientAPIWrapper implements kubeAPI and node API, which serve to isolate external dependencies
|
||||
// such that they're easier to mock in unit test.
|
||||
type clientAPIWrapper struct {
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
}
|
||||
|
||||
func (cw *clientAPIWrapper) killPod(ns, name string) error {
|
||||
return cw.client.Pods(ns).Delete(name, api.NewDeleteOptions(0))
|
||||
return cw.client.Legacy().Pods(ns).Delete(name, api.NewDeleteOptions(0))
|
||||
}
|
||||
|
||||
func (cw *clientAPIWrapper) createOrUpdate(hostname string, slaveAttrLabels, annotations map[string]string) (*api.Node, error) {
|
||||
|
|
|
@ -26,6 +26,8 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
log "github.com/golang/glog"
|
||||
|
@ -40,7 +42,6 @@ import (
|
|||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
kruntime "k8s.io/kubernetes/pkg/runtime"
|
||||
|
@ -112,7 +113,7 @@ type Executor struct {
|
|||
}
|
||||
|
||||
type Config struct {
|
||||
APIClient *client.Client
|
||||
APIClient *clientset.Clientset
|
||||
Docker dockertools.DockerInterface
|
||||
ShutdownAlert func()
|
||||
SuicideTimeout time.Duration
|
||||
|
|
|
@ -16,9 +16,7 @@ limitations under the License.
|
|||
|
||||
package executor
|
||||
|
||||
import (
|
||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||
)
|
||||
import mesos "github.com/mesos/mesos-go/mesosproto"
|
||||
|
||||
type NodeInfo struct {
|
||||
Cores int
|
||||
|
|
|
@ -21,10 +21,11 @@ import (
|
|||
"errors"
|
||||
"sync"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/executor/messages"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
)
|
||||
|
@ -69,7 +70,7 @@ type (
|
|||
}
|
||||
|
||||
registryImpl struct {
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
updates chan *PodEvent
|
||||
m sync.RWMutex
|
||||
boundTasks map[string]*api.Pod
|
||||
|
@ -108,7 +109,7 @@ func (rp *PodEvent) FormatShort() string {
|
|||
return "task '" + rp.taskID + "' pod '" + rp.pod.Namespace + "/" + rp.pod.Name + "'"
|
||||
}
|
||||
|
||||
func NewRegistry(client *client.Client) Registry {
|
||||
func NewRegistry(client *clientset.Clientset) Registry {
|
||||
r := ®istryImpl{
|
||||
client: client,
|
||||
updates: make(chan *PodEvent, updatesBacklogSize),
|
||||
|
@ -305,7 +306,7 @@ func (r registryImpl) bind(taskID string, pod *api.Pod) error {
|
|||
log.Infof("Binding task %v pod '%v/%v' to '%v' with annotations %+v...",
|
||||
taskID, pod.Namespace, pod.Name, binding.Target.Name, binding.Annotations)
|
||||
ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
|
||||
err := r.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
|
||||
err := r.client.LegacyClient.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
|
||||
if err != nil {
|
||||
log.Warningf("failed to bind task %v pod %v/%v: %v", taskID, pod.Namespace, pod.Name, err)
|
||||
return errCreateBindingFailed
|
||||
|
@ -320,7 +321,7 @@ func (r registryImpl) bind(taskID string, pod *api.Pod) error {
|
|||
patch.Metadata.Annotations = pod.Annotations
|
||||
patchJson, _ := json.Marshal(patch)
|
||||
log.V(4).Infof("Patching annotations %v of task %v pod %v/%v: %v", pod.Annotations, taskID, pod.Namespace, pod.Name, string(patchJson))
|
||||
err := r.client.Patch(api.MergePatchType).RequestURI(pod.SelfLink).Body(patchJson).Do().Error()
|
||||
err := r.client.LegacyClient.Patch(api.MergePatchType).RequestURI(pod.SelfLink).Body(patchJson).Do().Error()
|
||||
if err != nil {
|
||||
log.Errorf("Error updating annotations of ready-to-launch task %v pod %v/%v: %v", taskID, pod.Namespace, pod.Name, err)
|
||||
return errAnnotationUpdateFailure
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/kubernetes/contrib/mesos/pkg/hyperkube"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/kubelet"
|
||||
|
@ -74,7 +75,7 @@ func (s *KubeletExecutorServer) runExecutor(
|
|||
nodeInfos chan<- executor.NodeInfo,
|
||||
kubeletFinished <-chan struct{},
|
||||
staticPodsConfigPath string,
|
||||
apiclient *client.Client,
|
||||
apiclient *clientset.Clientset,
|
||||
registry executor.Registry,
|
||||
) (<-chan struct{}, error) {
|
||||
exec := executor.New(executor.Config{
|
||||
|
@ -116,7 +117,7 @@ func (s *KubeletExecutorServer) runKubelet(
|
|||
nodeInfos <-chan executor.NodeInfo,
|
||||
kubeletDone chan<- struct{},
|
||||
staticPodsConfigPath string,
|
||||
apiclient *client.Client,
|
||||
apiclient *clientset.Clientset,
|
||||
podLW *cache.ListWatch,
|
||||
registry executor.Registry,
|
||||
executorDone <-chan struct{},
|
||||
|
@ -163,7 +164,7 @@ func (s *KubeletExecutorServer) runKubelet(
|
|||
// make a separate client for events
|
||||
eventClientConfig.QPS = s.EventRecordQPS
|
||||
eventClientConfig.Burst = s.EventBurst
|
||||
kcfg.EventClient, err = client.New(eventClientConfig)
|
||||
kcfg.EventClient, err = clientset.NewForConfig(eventClientConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -229,10 +230,10 @@ func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
|
|||
}
|
||||
|
||||
// create apiserver client
|
||||
var apiclient *client.Client
|
||||
var apiclient *clientset.Clientset
|
||||
clientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer)
|
||||
if err == nil {
|
||||
apiclient, err = client.New(clientConfig)
|
||||
apiclient, err = clientset.NewForConfig(clientConfig)
|
||||
}
|
||||
if err != nil {
|
||||
// required for k8sm since we need to send api.Binding information back to the apiserver
|
||||
|
@ -240,7 +241,7 @@ func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
|
|||
}
|
||||
|
||||
var (
|
||||
pw = cache.NewListWatchFromClient(apiclient, "pods", api.NamespaceAll,
|
||||
pw = cache.NewListWatchFromClient(apiclient.LegacyClient, "pods", api.NamespaceAll,
|
||||
fields.OneTermEqualSelector(client.PodHost, s.HostnameOverride),
|
||||
)
|
||||
reg = executor.NewRegistry(apiclient)
|
||||
|
|
|
@ -23,12 +23,13 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/validation"
|
||||
)
|
||||
|
||||
|
@ -41,7 +42,7 @@ const (
|
|||
// Create creates a new node api object with the given hostname,
|
||||
// slave attribute labels and annotations
|
||||
func Create(
|
||||
client *client.Client,
|
||||
client *clientset.Clientset,
|
||||
hostName string,
|
||||
slaveAttrLabels,
|
||||
annotations map[string]string,
|
||||
|
@ -87,7 +88,7 @@ func Create(
|
|||
// The updated node merges the given slave attribute labels
|
||||
// and annotations with the found api object.
|
||||
func Update(
|
||||
client *client.Client,
|
||||
client *clientset.Clientset,
|
||||
hostname string,
|
||||
slaveAttrLabels,
|
||||
annotations map[string]string,
|
||||
|
@ -122,7 +123,7 @@ func Update(
|
|||
|
||||
// CreateOrUpdate creates a node api object or updates an existing one
|
||||
func CreateOrUpdate(
|
||||
client *client.Client,
|
||||
client *clientset.Clientset,
|
||||
hostname string,
|
||||
slaveAttrLabels,
|
||||
annotations map[string]string,
|
||||
|
|
|
@ -20,12 +20,13 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/queue"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
type Registrator interface {
|
||||
|
@ -62,11 +63,11 @@ type LookupFunc func(hostName string) *api.Node
|
|||
|
||||
type clientRegistrator struct {
|
||||
lookupNode LookupFunc
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
queue *queue.HistoricalFIFO
|
||||
}
|
||||
|
||||
func NewRegistrator(client *client.Client, lookupNode LookupFunc) *clientRegistrator {
|
||||
func NewRegistrator(client *clientset.Clientset, lookupNode LookupFunc) *clientRegistrator {
|
||||
return &clientRegistrator{
|
||||
lookupNode: lookupNode,
|
||||
client: client,
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"k8s.io/kubernetes/cmd/kubelet/app/options"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/runtime"
|
||||
|
@ -27,7 +29,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/mesos"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
@ -40,13 +41,13 @@ const (
|
|||
)
|
||||
|
||||
type StatusUpdater struct {
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
relistPeriod time.Duration
|
||||
heartBeatPeriod time.Duration
|
||||
nowFunc func() time.Time
|
||||
}
|
||||
|
||||
func NewStatusUpdater(client *client.Client, relistPeriod time.Duration, nowFunc func() time.Time) *StatusUpdater {
|
||||
func NewStatusUpdater(client *clientset.Clientset, relistPeriod time.Duration, nowFunc func() time.Time) *StatusUpdater {
|
||||
kubecfg := options.NewKubeletServer() // only create to get the config, this is without side-effects
|
||||
return &StatusUpdater{
|
||||
client: client,
|
||||
|
@ -58,7 +59,7 @@ func NewStatusUpdater(client *client.Client, relistPeriod time.Duration, nowFunc
|
|||
|
||||
func (u *StatusUpdater) Run(terminate <-chan struct{}) error {
|
||||
nodeStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
nodeLW := cache.NewListWatchFromClient(u.client, "nodes", api.NamespaceAll, fields.Everything())
|
||||
nodeLW := cache.NewListWatchFromClient(u.client.LegacyClient, "nodes", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(nodeLW, &api.Node{}, nodeStore, u.relistPeriod).Run()
|
||||
|
||||
monitor := func() {
|
||||
|
|
|
@ -19,13 +19,14 @@ package controller
|
|||
import (
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/runtime"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/algorithm"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/binder"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -45,11 +46,11 @@ type controller struct {
|
|||
nextPod func() *api.Pod
|
||||
error func(*api.Pod, error)
|
||||
recorder record.EventRecorder
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
started chan<- struct{} // startup latch
|
||||
}
|
||||
|
||||
func New(client *client.Client, algorithm algorithm.SchedulerAlgorithm,
|
||||
func New(client *clientset.Clientset, algorithm algorithm.SchedulerAlgorithm,
|
||||
recorder record.EventRecorder, nextPod func() *api.Pod, error func(pod *api.Pod, schedulingErr error),
|
||||
binder binder.Binder, started chan<- struct{}) Controller {
|
||||
return &controller{
|
||||
|
@ -78,7 +79,7 @@ func (s *controller) scheduleOne() {
|
|||
// the scheduler has to take care of this:
|
||||
if pod.Spec.NodeName != "" && pod.DeletionTimestamp != nil {
|
||||
log.V(3).Infof("deleting pre-scheduled, not yet running pod: %s/%s", pod.Namespace, pod.Name)
|
||||
s.client.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
s.client.Legacy().Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ import (
|
|||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
@ -69,7 +69,7 @@ type framework struct {
|
|||
// Config related, write-once
|
||||
sched scheduler.Scheduler
|
||||
schedulerConfig *schedcfg.Config
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
failoverTimeout float64 // in seconds
|
||||
reconcileInterval int64
|
||||
nodeRegistrator node.Registrator
|
||||
|
@ -98,7 +98,7 @@ type framework struct {
|
|||
type Config struct {
|
||||
SchedulerConfig schedcfg.Config
|
||||
ExecutorId *mesos.ExecutorID
|
||||
Client *client.Client
|
||||
Client *clientset.Clientset
|
||||
StoreFrameworkId frameworkid.StoreFunc
|
||||
FailoverTimeout float64
|
||||
ReconcileInterval int64
|
||||
|
@ -526,7 +526,7 @@ func (k *framework) reconcileTerminalTask(driver bindings.SchedulerDriver, taskS
|
|||
// TODO(jdef) for case #2 don't delete the pod, just update it's status to Failed
|
||||
pod := &task.Pod
|
||||
log.Warningf("deleting rogue pod %v/%v for lost task %v", pod.Namespace, pod.Name, task.ID)
|
||||
if err := k.client.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
|
||||
if err := k.client.Legacy().Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
|
||||
log.Errorf("failed to delete pod %v/%v for terminal task %v: %v", pod.Namespace, pod.Name, task.ID, err)
|
||||
}
|
||||
} else if taskStatus.GetReason() == mesos.TaskStatus_REASON_EXECUTOR_TERMINATED || taskStatus.GetReason() == mesos.TaskStatus_REASON_EXECUTOR_UNREGISTERED {
|
||||
|
@ -578,7 +578,7 @@ func (k *framework) reconcileNonTerminalTask(driver bindings.SchedulerDriver, ta
|
|||
// possible rogue pod exists at this point because we can't identify it; should kill the task
|
||||
log.Errorf("possible rogue pod; illegal api.PodStatusResult, unable to parse full pod name from: '%v' for task %v: %v",
|
||||
podStatus.Name, taskId, err)
|
||||
} else if pod, err := k.client.Pods(namespace).Get(name); err == nil {
|
||||
} else if pod, err := k.client.Legacy().Pods(namespace).Get(name); err == nil {
|
||||
if t, ok, err := podtask.RecoverFrom(*pod); ok {
|
||||
log.Infof("recovered task %v from metadata in pod %v/%v", taskId, namespace, name)
|
||||
_, err := k.sched.Tasks().Register(t)
|
||||
|
@ -593,7 +593,7 @@ func (k *framework) reconcileNonTerminalTask(driver bindings.SchedulerDriver, ta
|
|||
} else if err != nil {
|
||||
//should kill the pod and the task
|
||||
log.Errorf("killing pod, failed to recover task from pod %v/%v: %v", namespace, name, err)
|
||||
if err := k.client.Pods(namespace).Delete(name, nil); err != nil {
|
||||
if err := k.client.Legacy().Pods(namespace).Delete(name, nil); err != nil {
|
||||
log.Errorf("failed to delete pod %v/%v: %v", namespace, name, err)
|
||||
}
|
||||
} else {
|
||||
|
@ -683,7 +683,7 @@ func (k *framework) makeTaskRegistryReconciler() taskreconciler.Action {
|
|||
// tasks identified by annotations in the Kubernetes pod registry.
|
||||
func (k *framework) makePodRegistryReconciler() taskreconciler.Action {
|
||||
return taskreconciler.Action(func(drv bindings.SchedulerDriver, cancel <-chan struct{}) <-chan error {
|
||||
podList, err := k.client.Pods(api.NamespaceAll).List(api.ListOptions{})
|
||||
podList, err := k.client.Legacy().Pods(api.NamespaceAll).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return proc.ErrorChanf("failed to reconcile pod registry: %v", err)
|
||||
}
|
||||
|
@ -759,7 +759,7 @@ func (k *framework) explicitlyReconcileTasks(driver bindings.SchedulerDriver, ta
|
|||
}
|
||||
|
||||
func (ks *framework) recoverTasks() error {
|
||||
podList, err := ks.client.Pods(api.NamespaceAll).List(api.ListOptions{})
|
||||
podList, err := ks.client.Legacy().Pods(api.NamespaceAll).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
log.V(1).Infof("failed to recover pod registry, madness may ensue: %v", err)
|
||||
return err
|
||||
|
@ -778,7 +778,7 @@ func (ks *framework) recoverTasks() error {
|
|||
}
|
||||
if t, ok, err := podtask.RecoverFrom(pod); err != nil {
|
||||
log.Errorf("failed to recover task from pod, will attempt to delete '%v/%v': %v", pod.Namespace, pod.Name, err)
|
||||
err := ks.client.Pods(pod.Namespace).Delete(pod.Name, nil)
|
||||
err := ks.client.Legacy().Pods(pod.Namespace).Delete(pod.Name, nil)
|
||||
//TODO(jdef) check for temporary or not-found errors
|
||||
if err != nil {
|
||||
log.Errorf("failed to delete pod '%v/%v': %v", pod.Namespace, pod.Name, err)
|
||||
|
|
|
@ -19,6 +19,8 @@ package podreconciler
|
|||
import (
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/deleter"
|
||||
|
@ -27,7 +29,6 @@ import (
|
|||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/queuer"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
// PodReconciler reconciles a pod with the apiserver
|
||||
|
@ -37,12 +38,12 @@ type PodReconciler interface {
|
|||
|
||||
type podReconciler struct {
|
||||
sched scheduler.Scheduler
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
qr queuer.Queuer
|
||||
deleter deleter.Deleter
|
||||
}
|
||||
|
||||
func New(sched scheduler.Scheduler, client *client.Client, qr queuer.Queuer, deleter deleter.Deleter) PodReconciler {
|
||||
func New(sched scheduler.Scheduler, client *clientset.Clientset, qr queuer.Queuer, deleter deleter.Deleter) PodReconciler {
|
||||
return &podReconciler{
|
||||
sched: sched,
|
||||
client: client,
|
||||
|
@ -65,7 +66,7 @@ func New(sched scheduler.Scheduler, client *client.Client, qr queuer.Queuer, del
|
|||
func (s *podReconciler) Reconcile(t *podtask.T) {
|
||||
log.V(1).Infof("reconcile pod %v, assigned to slave %q", t.Pod.Name, t.Spec.AssignedSlave)
|
||||
ctx := api.WithNamespace(api.NewDefaultContext(), t.Pod.Namespace)
|
||||
pod, err := s.client.Pods(api.NamespaceValue(ctx)).Get(t.Pod.Name)
|
||||
pod, err := s.client.Legacy().Pods(api.NamespaceValue(ctx)).Get(t.Pod.Name)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// attempt to delete
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"net/http"
|
||||
"sync"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/backoff"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/offers"
|
||||
|
@ -41,7 +43,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
// sched implements the Scheduler interface.
|
||||
|
@ -59,7 +60,7 @@ func New(
|
|||
c *config.Config,
|
||||
fw framework.Framework,
|
||||
ps podschedulers.PodScheduler,
|
||||
client *client.Client,
|
||||
client *clientset.Clientset,
|
||||
recorder record.EventRecorder,
|
||||
terminate <-chan struct{},
|
||||
mux *http.ServeMux,
|
||||
|
|
|
@ -48,6 +48,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
@ -488,7 +489,7 @@ func newLifecycleTest(t *testing.T) lifecycleTest {
|
|||
ei.Data = []byte{0, 1, 2}
|
||||
|
||||
// create framework
|
||||
client := client.NewOrDie(&client.Config{
|
||||
client := clientset.NewForConfigOrDie(&client.Config{
|
||||
Host: apiServer.server.URL,
|
||||
ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
|
||||
})
|
||||
|
|
|
@ -59,7 +59,7 @@ func (m *SchedulerServer) newServiceWriter(stop <-chan struct{}) func() {
|
|||
// doesn't already exist.
|
||||
func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, servicePort int) error {
|
||||
ctx := api.NewDefaultContext()
|
||||
if _, err := m.client.Services(api.NamespaceValue(ctx)).Get(serviceName); err == nil {
|
||||
if _, err := m.client.Legacy().Services(api.NamespaceValue(ctx)).Get(serviceName); err == nil {
|
||||
// The service already exists.
|
||||
return nil
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, ser
|
|||
if m.serviceAddress != nil {
|
||||
svc.Spec.ClusterIP = m.serviceAddress.String()
|
||||
}
|
||||
_, err := m.client.Services(api.NamespaceValue(ctx)).Create(svc)
|
||||
_, err := m.client.Legacy().Services(api.NamespaceValue(ctx)).Create(svc)
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
err = nil
|
||||
}
|
||||
|
|
|
@ -33,6 +33,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
log "github.com/golang/glog"
|
||||
|
@ -172,7 +174,7 @@ type SchedulerServer struct {
|
|||
conntrackTCPTimeoutEstablished int
|
||||
|
||||
executable string // path to the binary running this service
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
driver bindings.SchedulerDriver
|
||||
driverMutex sync.RWMutex
|
||||
mux *http.ServeMux
|
||||
|
@ -520,7 +522,7 @@ func (s *SchedulerServer) prepareStaticPods() (data []byte, staticPodCPUs, stati
|
|||
|
||||
// TODO(jdef): hacked from kubelet/server/server.go
|
||||
// TODO(k8s): replace this with clientcmd
|
||||
func (s *SchedulerServer) createAPIServerClient() (*client.Client, error) {
|
||||
func (s *SchedulerServer) createAPIServerClient() (*clientset.Clientset, error) {
|
||||
authInfo, err := clientauth.LoadFromFile(s.authPath)
|
||||
if err != nil {
|
||||
log.Warningf("Could not load kubernetes auth path: %v. Continuing with defaults.", err)
|
||||
|
@ -541,7 +543,7 @@ func (s *SchedulerServer) createAPIServerClient() (*client.Client, error) {
|
|||
log.Infof("Multiple api servers specified. Picking first one")
|
||||
}
|
||||
clientConfig.Host = s.apiServerList[0]
|
||||
c, err := client.New(&clientConfig)
|
||||
c, err := clientset.NewForConfig(&clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -721,7 +723,7 @@ func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config
|
|||
if err != nil {
|
||||
log.Fatalf("Cannot create client to watch nodes: %v", err)
|
||||
}
|
||||
nodeLW := cache.NewListWatchFromClient(nodesClient, "nodes", api.NamespaceAll, fields.Everything())
|
||||
nodeLW := cache.NewListWatchFromClient(nodesClient.LegacyClient, "nodes", api.NamespaceAll, fields.Everything())
|
||||
nodeStore, nodeCtl := controllerfw.NewInformer(nodeLW, &api.Node{}, s.nodeRelistPeriod, &controllerfw.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
node := obj.(*api.Node)
|
||||
|
@ -795,7 +797,7 @@ func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config
|
|||
broadcaster.StartRecordingToSink(client.Events(""))
|
||||
|
||||
// create scheduler core with all components arranged around it
|
||||
lw := cache.NewListWatchFromClient(client, "pods", api.NamespaceAll, fields.Everything())
|
||||
lw := cache.NewListWatchFromClient(client.LegacyClient, "pods", api.NamespaceAll, fields.Everything())
|
||||
sched := components.New(
|
||||
sc,
|
||||
framework,
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/endpoints"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
kservice "k8s.io/kubernetes/pkg/controller/endpoint"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
@ -51,7 +51,7 @@ type EndpointController interface {
|
|||
}
|
||||
|
||||
// NewEndpointController returns a new *EndpointController.
|
||||
func NewEndpointController(client *client.Client) *endpointController {
|
||||
func NewEndpointController(client *clientset.Clientset) *endpointController {
|
||||
e := &endpointController{
|
||||
client: client,
|
||||
queue: workqueue.New(),
|
||||
|
@ -59,10 +59,10 @@ func NewEndpointController(client *client.Client) *endpointController {
|
|||
e.serviceStore.Store, e.serviceController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Services(api.NamespaceAll).List(options)
|
||||
return e.client.Legacy().Services(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Services(api.NamespaceAll).Watch(options)
|
||||
return e.client.Legacy().Services(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Service{},
|
||||
|
@ -79,10 +79,10 @@ func NewEndpointController(client *client.Client) *endpointController {
|
|||
e.podStore.Store, e.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Pods(api.NamespaceAll).List(options)
|
||||
return e.client.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Pods(api.NamespaceAll).Watch(options)
|
||||
return e.client.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
|
@ -98,7 +98,7 @@ func NewEndpointController(client *client.Client) *endpointController {
|
|||
|
||||
// EndpointController manages selector-based service endpoints.
|
||||
type endpointController struct {
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
|
||||
serviceStore cache.StoreToServiceLister
|
||||
podStore cache.StoreToPodLister
|
||||
|
@ -264,7 +264,7 @@ func (e *endpointController) syncService(key string) {
|
|||
// Don't retry, as the key isn't going to magically become understandable.
|
||||
return
|
||||
}
|
||||
err = e.client.Endpoints(namespace).Delete(name)
|
||||
err = e.client.Endpoints(namespace).Delete(name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
glog.Errorf("Error deleting endpoint %q: %v", key, err)
|
||||
e.queue.Add(key) // Retry
|
||||
|
|
|
@ -16,16 +16,14 @@ limitations under the License.
|
|||
|
||||
package admission
|
||||
|
||||
import (
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
import clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
// chainAdmissionHandler is an instance of admission.Interface that performs admission control using a chain of admission handlers
|
||||
type chainAdmissionHandler []Interface
|
||||
|
||||
// NewFromPlugins returns an admission.Interface that will enforce admission control decisions of all
|
||||
// the given plugins.
|
||||
func NewFromPlugins(client client.Interface, pluginNames []string, configFilePath string) Interface {
|
||||
func NewFromPlugins(client clientset.Interface, pluginNames []string, configFilePath string) Interface {
|
||||
plugins := []Interface{}
|
||||
for _, pluginName := range pluginNames {
|
||||
plugin := InitPlugin(pluginName, client, configFilePath)
|
||||
|
|
|
@ -23,14 +23,14 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
)
|
||||
|
||||
// Factory is a function that returns an Interface for admission decisions.
|
||||
// The config parameter provides an io.Reader handler to the factory in
|
||||
// order to load specific configurations. If no configuration is provided
|
||||
// the parameter is nil.
|
||||
type Factory func(client client.Interface, config io.Reader) (Interface, error)
|
||||
type Factory func(client clientset.Interface, config io.Reader) (Interface, error)
|
||||
|
||||
// All registered admission options.
|
||||
var (
|
||||
|
@ -67,7 +67,7 @@ func RegisterPlugin(name string, plugin Factory) {
|
|||
// known. The error is returned only when the named provider was known but failed
|
||||
// to initialize. The config parameter specifies the io.Reader handler of the
|
||||
// configuration file for the cloud provider, or nil for no configuration.
|
||||
func GetPlugin(name string, client client.Interface, config io.Reader) (Interface, error) {
|
||||
func GetPlugin(name string, client clientset.Interface, config io.Reader) (Interface, error) {
|
||||
pluginsMutex.Lock()
|
||||
defer pluginsMutex.Unlock()
|
||||
f, found := plugins[name]
|
||||
|
@ -78,7 +78,7 @@ func GetPlugin(name string, client client.Interface, config io.Reader) (Interfac
|
|||
}
|
||||
|
||||
// InitPlugin creates an instance of the named interface.
|
||||
func InitPlugin(name string, client client.Interface, configFilePath string) Interface {
|
||||
func InitPlugin(name string, client clientset.Interface, configFilePath string) Interface {
|
||||
var (
|
||||
config *os.File
|
||||
err error
|
||||
|
|
|
@ -2152,6 +2152,8 @@ type SecretList struct {
|
|||
Items []Secret `json:"items"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
|
||||
// ConfigMap holds configuration data for components or applications to consume.
|
||||
type ConfigMap struct {
|
||||
unversioned.TypeMeta `json:",inline"`
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package release_1_1
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
extensions_unversioned "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
|
||||
legacy_unversioned "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
unversioned "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
|
@ -57,18 +58,18 @@ func NewForConfig(c *unversioned.Config) (*Clientset, error) {
|
|||
var err error
|
||||
clientset.LegacyClient, err = legacy_unversioned.NewForConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return &clientset, err
|
||||
}
|
||||
clientset.ExtensionsClient, err = extensions_unversioned.NewForConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return &clientset, err
|
||||
}
|
||||
|
||||
clientset.DiscoveryClient, err = unversioned.NewDiscoveryClientForConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
glog.Errorf("failed to create the DiscoveryClient: %v", err)
|
||||
}
|
||||
return &clientset, nil
|
||||
return &clientset, err
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new Clientset for the given config and
|
||||
|
|
|
@ -18,6 +18,7 @@ package unversioned
|
|||
|
||||
import "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
|
||||
// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface.
|
||||
type DeploymentExpansion interface {
|
||||
Rollback(*extensions.DeploymentRollback) error
|
||||
}
|
||||
|
|
|
@ -24,8 +24,6 @@ type IngressExpansion interface{}
|
|||
|
||||
type JobExpansion interface{}
|
||||
|
||||
type ScaleExpansion interface{}
|
||||
|
||||
type ThirdPartyResourceExpansion interface{}
|
||||
|
||||
type ReplicaSetExpansion interface{}
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package unversioned
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface.
|
||||
type ScaleExpansion interface {
|
||||
Get(kind string, name string) (*extensions.Scale, error)
|
||||
Update(kind string, scale *extensions.Scale) (*extensions.Scale, error)
|
||||
}
|
||||
|
||||
// Get takes the reference to scale subresource and returns the subresource or error, if one occurs.
|
||||
func (c *scales) Get(kind string, name string) (result *extensions.Scale, err error) {
|
||||
result = &extensions.Scale{}
|
||||
|
||||
// TODO this method needs to take a proper unambiguous kind
|
||||
fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind}
|
||||
resource, _ := meta.KindToResource(fullyQualifiedKind, false)
|
||||
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource(resource.Resource).
|
||||
Name(name).
|
||||
SubResource("scale").
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *scales) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) {
|
||||
result = &extensions.Scale{}
|
||||
|
||||
// TODO this method needs to take a proper unambiguous kind
|
||||
fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind}
|
||||
resource, _ := meta.KindToResource(fullyQualifiedKind, false)
|
||||
|
||||
err = c.client.Put().
|
||||
Namespace(scale.Namespace).
|
||||
Resource(resource.Resource).
|
||||
Name(scale.Name).
|
||||
SubResource("scale").
|
||||
Body(scale).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package unversioned
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/api"
|
||||
watch "k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
// ConfigMapsGetter has a method to return a ConfigMapInterface.
|
||||
// A group's client should implement this interface.
|
||||
type ConfigMapsGetter interface {
|
||||
ConfigMaps(namespace string) ConfigMapInterface
|
||||
}
|
||||
|
||||
// ConfigMapInterface has methods to work with ConfigMap resources.
|
||||
type ConfigMapInterface interface {
|
||||
Create(*api.ConfigMap) (*api.ConfigMap, error)
|
||||
Update(*api.ConfigMap) (*api.ConfigMap, error)
|
||||
Delete(name string, options *api.DeleteOptions) error
|
||||
DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error
|
||||
Get(name string) (*api.ConfigMap, error)
|
||||
List(opts api.ListOptions) (*api.ConfigMapList, error)
|
||||
Watch(opts api.ListOptions) (watch.Interface, error)
|
||||
ConfigMapExpansion
|
||||
}
|
||||
|
||||
// configMaps implements ConfigMapInterface
|
||||
type configMaps struct {
|
||||
client *LegacyClient
|
||||
ns string
|
||||
}
|
||||
|
||||
// newConfigMaps returns a ConfigMaps
|
||||
func newConfigMaps(c *LegacyClient, namespace string) *configMaps {
|
||||
return &configMaps{
|
||||
client: c,
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any.
|
||||
func (c *configMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, err error) {
|
||||
result = &api.ConfigMap{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("configmaps").
|
||||
Body(configMap).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any.
|
||||
func (c *configMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap, err error) {
|
||||
result = &api.ConfigMap{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("configmaps").
|
||||
Name(configMap.Name).
|
||||
Body(configMap).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the configMap and deletes it. Returns an error if one occurs.
|
||||
func (c *configMaps) Delete(name string, options *api.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("configmaps").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *configMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("configmaps").
|
||||
VersionedParams(&listOptions, api.ParameterCodec).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any.
|
||||
func (c *configMaps) Get(name string) (result *api.ConfigMap, err error) {
|
||||
result = &api.ConfigMap{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("configmaps").
|
||||
Name(name).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors.
|
||||
func (c *configMaps) List(opts api.ListOptions) (result *api.ConfigMapList, err error) {
|
||||
result = &api.ConfigMapList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("configmaps").
|
||||
VersionedParams(&opts, api.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested configMaps.
|
||||
func (c *configMaps) Watch(opts api.ListOptions) (watch.Interface, error) {
|
||||
return c.client.Get().
|
||||
Prefix("watch").
|
||||
Namespace(c.ns).
|
||||
Resource("configmaps").
|
||||
VersionedParams(&opts, api.ParameterCodec).
|
||||
Watch()
|
||||
}
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// The EventExpansion interface allows manually adding extra methods to the EventInterface.
|
||||
type EventExpansion interface {
|
||||
// CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace.
|
||||
CreateWithEventNamespace(event *api.Event) (*api.Event, error)
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/api"
|
||||
core "k8s.io/kubernetes/pkg/client/testing/core"
|
||||
labels "k8s.io/kubernetes/pkg/labels"
|
||||
watch "k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
// FakeConfigMaps implements ConfigMapInterface
|
||||
type FakeConfigMaps struct {
|
||||
Fake *FakeLegacy
|
||||
ns string
|
||||
}
|
||||
|
||||
func (c *FakeConfigMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewCreateAction("configmaps", c.ns, configMap), &api.ConfigMap{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*api.ConfigMap), err
|
||||
}
|
||||
|
||||
func (c *FakeConfigMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewUpdateAction("configmaps", c.ns, configMap), &api.ConfigMap{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*api.ConfigMap), err
|
||||
}
|
||||
|
||||
func (c *FakeConfigMaps) Delete(name string, options *api.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(core.NewDeleteAction("configmaps", c.ns, name), &api.ConfigMap{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *FakeConfigMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
|
||||
action := core.NewDeleteCollectionAction("events", c.ns, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &api.ConfigMapList{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *FakeConfigMaps) Get(name string) (result *api.ConfigMap, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewGetAction("configmaps", c.ns, name), &api.ConfigMap{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*api.ConfigMap), err
|
||||
}
|
||||
|
||||
func (c *FakeConfigMaps) List(opts api.ListOptions) (result *api.ConfigMapList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(core.NewListAction("configmaps", c.ns, opts), &api.ConfigMapList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label := opts.LabelSelector
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &api.ConfigMapList{}
|
||||
for _, item := range obj.(*api.ConfigMapList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested configMaps.
|
||||
func (c *FakeConfigMaps) Watch(opts api.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(core.NewWatchAction("configmaps", c.ns, opts))
|
||||
|
||||
}
|
|
@ -29,6 +29,10 @@ func (c *FakeLegacy) ComponentStatuses() unversioned.ComponentStatusInterface {
|
|||
return &FakeComponentStatuses{c}
|
||||
}
|
||||
|
||||
func (c *FakeLegacy) ConfigMaps(namespace string) unversioned.ConfigMapInterface {
|
||||
return &FakeConfigMaps{c, namespace}
|
||||
}
|
||||
|
||||
func (c *FakeLegacy) Endpoints(namespace string) unversioned.EndpointsInterface {
|
||||
return &FakeEndpoints{c, namespace}
|
||||
}
|
||||
|
|
|
@ -37,3 +37,5 @@ type ResourceQuotaExpansion interface{}
|
|||
type SecretExpansion interface{}
|
||||
|
||||
type ServiceAccountExpansion interface{}
|
||||
|
||||
type ConfigMapExpansion interface{}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
type LegacyInterface interface {
|
||||
ComponentStatusesGetter
|
||||
ConfigMapsGetter
|
||||
EndpointsGetter
|
||||
EventsGetter
|
||||
LimitRangesGetter
|
||||
|
@ -49,6 +50,10 @@ func (c *LegacyClient) ComponentStatuses() ComponentStatusInterface {
|
|||
return newComponentStatuses(c)
|
||||
}
|
||||
|
||||
func (c *LegacyClient) ConfigMaps(namespace string) ConfigMapInterface {
|
||||
return newConfigMaps(c, namespace)
|
||||
}
|
||||
|
||||
func (c *LegacyClient) Endpoints(namespace string) EndpointsInterface {
|
||||
return newEndpoints(c, namespace)
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package unversioned
|
|||
|
||||
import "k8s.io/kubernetes/pkg/api"
|
||||
|
||||
// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface.
|
||||
type NamespaceExpansion interface {
|
||||
Finalize(item *api.Namespace) (*api.Namespace, error)
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
unversioned "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
// The PodExpansion interface allows manually adding extra methods to the PodInterface.
|
||||
type PodExpansion interface {
|
||||
Bind(binding *api.Binding) error
|
||||
GetLogs(name string, opts *api.PodLogOptions) *unversioned.Request
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/net"
|
||||
)
|
||||
|
||||
// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface.
|
||||
type ServiceExpansion interface {
|
||||
ProxyGet(scheme, name, port, path string, params map[string]string) unversioned.ResponseWrapper
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
podutil "k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
@ -53,7 +53,7 @@ var (
|
|||
)
|
||||
|
||||
// NewEndpointController returns a new *EndpointController.
|
||||
func NewEndpointController(client *client.Client, resyncPeriod controller.ResyncPeriodFunc) *EndpointController {
|
||||
func NewEndpointController(client *clientset.Clientset, resyncPeriod controller.ResyncPeriodFunc) *EndpointController {
|
||||
e := &EndpointController{
|
||||
client: client,
|
||||
queue: workqueue.New(),
|
||||
|
@ -62,10 +62,10 @@ func NewEndpointController(client *client.Client, resyncPeriod controller.Resync
|
|||
e.serviceStore.Store, e.serviceController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Services(api.NamespaceAll).List(options)
|
||||
return e.client.Legacy().Services(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Services(api.NamespaceAll).Watch(options)
|
||||
return e.client.Legacy().Services(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Service{},
|
||||
|
@ -83,10 +83,10 @@ func NewEndpointController(client *client.Client, resyncPeriod controller.Resync
|
|||
e.podStore.Store, e.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Pods(api.NamespaceAll).List(options)
|
||||
return e.client.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Pods(api.NamespaceAll).Watch(options)
|
||||
return e.client.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
|
@ -103,7 +103,7 @@ func NewEndpointController(client *client.Client, resyncPeriod controller.Resync
|
|||
|
||||
// EndpointController manages selector-based service endpoints.
|
||||
type EndpointController struct {
|
||||
client *client.Client
|
||||
client *clientset.Clientset
|
||||
|
||||
serviceStore cache.StoreToServiceLister
|
||||
podStore cache.StoreToPodLister
|
||||
|
@ -268,7 +268,7 @@ func (e *EndpointController) syncService(key string) {
|
|||
// Don't retry, as the key isn't going to magically become understandable.
|
||||
return
|
||||
}
|
||||
err = e.client.Endpoints(namespace).Delete(name)
|
||||
err = e.client.Endpoints(namespace).Delete(name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
glog.Errorf("Error deleting endpoint %q: %v", key, err)
|
||||
e.queue.Add(key) // Retry
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
_ "k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
|
@ -104,7 +105,7 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
|
@ -137,7 +138,7 @@ func TestCheckLeftoverEndpoints(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.checkLeftoverEndpoints()
|
||||
|
||||
|
@ -166,7 +167,7 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
|
@ -208,7 +209,7 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
|
@ -247,7 +248,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
|
@ -285,7 +286,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, ns, 0, 1, 1)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
|
@ -323,7 +324,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, ns, 1, 1, 1)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
|
@ -365,7 +366,7 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
|
@ -406,7 +407,7 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1, 0)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
|
@ -426,7 +427,7 @@ func TestSyncEndpointsItems(t *testing.T) {
|
|||
serverResponse{http.StatusOK, &api.Endpoints{}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, ns, 3, 2, 0)
|
||||
addPods(endpoints.podStore.Store, "blah", 5, 2, 0) // make sure these aren't found!
|
||||
|
@ -469,7 +470,7 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) {
|
|||
serverResponse{http.StatusOK, &api.Endpoints{}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, ns, 3, 2, 0)
|
||||
serviceLabels := map[string]string{"foo": "bar"}
|
||||
|
@ -530,7 +531,7 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
|
|||
}})
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
|
||||
addPods(endpoints.podStore.Store, ns, 1, 1, 0)
|
||||
serviceLabels := map[string]string{"baz": "blah"}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
|
@ -41,19 +41,19 @@ const (
|
|||
)
|
||||
|
||||
type GCController struct {
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
podStore cache.StoreToPodLister
|
||||
podStoreSyncer *framework.Controller
|
||||
deletePod func(namespace, name string) error
|
||||
threshold int
|
||||
}
|
||||
|
||||
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
|
||||
func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
|
||||
gcc := &GCController{
|
||||
kubeClient: kubeClient,
|
||||
threshold: threshold,
|
||||
deletePod: func(namespace, name string) error {
|
||||
return kubeClient.Pods(namespace).Delete(name, api.NewDeleteOptions(0))
|
||||
return kubeClient.Legacy().Pods(namespace).Delete(name, api.NewDeleteOptions(0))
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -63,11 +63,11 @@ func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc,
|
|||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = terminatedSelector
|
||||
return gcc.kubeClient.Pods(api.NamespaceAll).List(options)
|
||||
return gcc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = terminatedSelector
|
||||
return gcc.kubeClient.Pods(api.NamespaceAll).Watch(options)
|
||||
return gcc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
@ -66,7 +66,7 @@ func TestGC(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
client := testclient.NewSimpleFake()
|
||||
client := fake.NewSimpleClientset()
|
||||
gcc := New(client, controller.NoResyncPeriodFunc, test.threshold)
|
||||
deletedPodNames := make([]string, 0)
|
||||
var lock sync.Mutex
|
||||
|
|
|
@ -24,7 +24,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
extensions_unversioned "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
|
@ -41,15 +42,15 @@ type NamespaceController struct {
|
|||
}
|
||||
|
||||
// NewNamespaceController creates a new NamespaceController
|
||||
func NewNamespaceController(kubeClient client.Interface, versions *unversioned.APIVersions, resyncPeriod time.Duration) *NamespaceController {
|
||||
func NewNamespaceController(kubeClient clientset.Interface, versions *unversioned.APIVersions, resyncPeriod time.Duration) *NamespaceController {
|
||||
var controller *framework.Controller
|
||||
_, controller = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Namespaces().List(options)
|
||||
return kubeClient.Legacy().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Namespaces().Watch(options)
|
||||
return kubeClient.Legacy().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
|
@ -125,7 +126,7 @@ func finalized(namespace *api.Namespace) bool {
|
|||
}
|
||||
|
||||
// finalize will finalize the namespace for kubernetes
|
||||
func finalizeNamespaceFunc(kubeClient client.Interface, namespace *api.Namespace) (*api.Namespace, error) {
|
||||
func finalizeNamespaceFunc(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) {
|
||||
namespaceFinalize := api.Namespace{}
|
||||
namespaceFinalize.ObjectMeta = namespace.ObjectMeta
|
||||
namespaceFinalize.Spec = namespace.Spec
|
||||
|
@ -139,7 +140,7 @@ func finalizeNamespaceFunc(kubeClient client.Interface, namespace *api.Namespace
|
|||
for _, value := range finalizerSet.List() {
|
||||
namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value))
|
||||
}
|
||||
namespace, err := kubeClient.Namespaces().Finalize(&namespaceFinalize)
|
||||
namespace, err := kubeClient.Legacy().Namespaces().Finalize(&namespaceFinalize)
|
||||
if err != nil {
|
||||
// it was removed already, so life is good
|
||||
if errors.IsNotFound(err) {
|
||||
|
@ -160,7 +161,7 @@ func (e *contentRemainingError) Error() string {
|
|||
// deleteAllContent will delete all content known to the system in a namespace. It returns an estimate
|
||||
// of the time remaining before the remaining resources are deleted. If estimate > 0 not all resources
|
||||
// are guaranteed to be gone.
|
||||
func deleteAllContent(kubeClient client.Interface, versions *unversioned.APIVersions, namespace string, before unversioned.Time) (estimate int64, err error) {
|
||||
func deleteAllContent(kubeClient clientset.Interface, versions *unversioned.APIVersions, namespace string, before unversioned.Time) (estimate int64, err error) {
|
||||
err = deleteServiceAccounts(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return estimate, err
|
||||
|
@ -238,11 +239,11 @@ func deleteAllContent(kubeClient client.Interface, versions *unversioned.APIVers
|
|||
}
|
||||
|
||||
// updateNamespaceFunc is a function that makes an update to a namespace
|
||||
type updateNamespaceFunc func(kubeClient client.Interface, namespace *api.Namespace) (*api.Namespace, error)
|
||||
type updateNamespaceFunc func(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error)
|
||||
|
||||
// retryOnConflictError retries the specified fn if there was a conflict error
|
||||
// TODO RetryOnConflict should be a generic concept in client code
|
||||
func retryOnConflictError(kubeClient client.Interface, namespace *api.Namespace, fn updateNamespaceFunc) (result *api.Namespace, err error) {
|
||||
func retryOnConflictError(kubeClient clientset.Interface, namespace *api.Namespace, fn updateNamespaceFunc) (result *api.Namespace, err error) {
|
||||
latestNamespace := namespace
|
||||
for {
|
||||
result, err = fn(kubeClient, latestNamespace)
|
||||
|
@ -252,7 +253,7 @@ func retryOnConflictError(kubeClient client.Interface, namespace *api.Namespace,
|
|||
if !errors.IsConflict(err) {
|
||||
return nil, err
|
||||
}
|
||||
latestNamespace, err = kubeClient.Namespaces().Get(latestNamespace.Name)
|
||||
latestNamespace, err = kubeClient.Legacy().Namespaces().Get(latestNamespace.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -261,7 +262,7 @@ func retryOnConflictError(kubeClient client.Interface, namespace *api.Namespace,
|
|||
}
|
||||
|
||||
// updateNamespaceStatusFunc will verify that the status of the namespace is correct
|
||||
func updateNamespaceStatusFunc(kubeClient client.Interface, namespace *api.Namespace) (*api.Namespace, error) {
|
||||
func updateNamespaceStatusFunc(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) {
|
||||
if namespace.DeletionTimestamp.IsZero() || namespace.Status.Phase == api.NamespaceTerminating {
|
||||
return namespace, nil
|
||||
}
|
||||
|
@ -269,11 +270,11 @@ func updateNamespaceStatusFunc(kubeClient client.Interface, namespace *api.Names
|
|||
newNamespace.ObjectMeta = namespace.ObjectMeta
|
||||
newNamespace.Status = namespace.Status
|
||||
newNamespace.Status.Phase = api.NamespaceTerminating
|
||||
return kubeClient.Namespaces().Status(&newNamespace)
|
||||
return kubeClient.Legacy().Namespaces().UpdateStatus(&newNamespace)
|
||||
}
|
||||
|
||||
// syncNamespace orchestrates deletion of a Namespace and its associated content.
|
||||
func syncNamespace(kubeClient client.Interface, versions *unversioned.APIVersions, namespace *api.Namespace) error {
|
||||
func syncNamespace(kubeClient clientset.Interface, versions *unversioned.APIVersions, namespace *api.Namespace) error {
|
||||
if namespace.DeletionTimestamp == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -281,7 +282,7 @@ func syncNamespace(kubeClient client.Interface, versions *unversioned.APIVersion
|
|||
// multiple controllers may edit a namespace during termination
|
||||
// first get the latest state of the namespace before proceeding
|
||||
// if the namespace was deleted already, don't do anything
|
||||
namespace, err := kubeClient.Namespaces().Get(namespace.Name)
|
||||
namespace, err := kubeClient.Legacy().Namespaces().Get(namespace.Name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
|
@ -303,7 +304,7 @@ func syncNamespace(kubeClient client.Interface, versions *unversioned.APIVersion
|
|||
|
||||
// if the namespace is already finalized, delete it
|
||||
if finalized(namespace) {
|
||||
err = kubeClient.Namespaces().Delete(namespace.Name)
|
||||
err = kubeClient.Legacy().Namespaces().Delete(namespace.Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -327,7 +328,7 @@ func syncNamespace(kubeClient client.Interface, versions *unversioned.APIVersion
|
|||
|
||||
// now check if all finalizers have reported that we delete now
|
||||
if finalized(result) {
|
||||
err = kubeClient.Namespaces().Delete(namespace.Name)
|
||||
err = kubeClient.Legacy().Namespaces().Delete(namespace.Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -336,13 +337,13 @@ func syncNamespace(kubeClient client.Interface, versions *unversioned.APIVersion
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteLimitRanges(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.LimitRanges(ns).List(api.ListOptions{})
|
||||
func deleteLimitRanges(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().LimitRanges(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.LimitRanges(ns).Delete(items.Items[i].Name)
|
||||
err := kubeClient.Legacy().LimitRanges(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -350,13 +351,13 @@ func deleteLimitRanges(kubeClient client.Interface, ns string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteResourceQuotas(kubeClient client.Interface, ns string) error {
|
||||
resourceQuotas, err := kubeClient.ResourceQuotas(ns).List(api.ListOptions{})
|
||||
func deleteResourceQuotas(kubeClient clientset.Interface, ns string) error {
|
||||
resourceQuotas, err := kubeClient.Legacy().ResourceQuotas(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range resourceQuotas.Items {
|
||||
err := kubeClient.ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name)
|
||||
err := kubeClient.Legacy().ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -364,13 +365,13 @@ func deleteResourceQuotas(kubeClient client.Interface, ns string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteServiceAccounts(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.ServiceAccounts(ns).List(api.ListOptions{})
|
||||
func deleteServiceAccounts(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().ServiceAccounts(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.ServiceAccounts(ns).Delete(items.Items[i].Name)
|
||||
err := kubeClient.Legacy().ServiceAccounts(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -378,13 +379,13 @@ func deleteServiceAccounts(kubeClient client.Interface, ns string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteServices(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.Services(ns).List(api.ListOptions{})
|
||||
func deleteServices(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().Services(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Services(ns).Delete(items.Items[i].Name)
|
||||
err := kubeClient.Legacy().Services(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -392,13 +393,13 @@ func deleteServices(kubeClient client.Interface, ns string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteReplicationControllers(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.ReplicationControllers(ns).List(api.ListOptions{})
|
||||
func deleteReplicationControllers(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().ReplicationControllers(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name)
|
||||
err := kubeClient.Legacy().ReplicationControllers(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -406,8 +407,8 @@ func deleteReplicationControllers(kubeClient client.Interface, ns string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func deletePods(kubeClient client.Interface, ns string, before unversioned.Time) (int64, error) {
|
||||
items, err := kubeClient.Pods(ns).List(api.ListOptions{})
|
||||
func deletePods(kubeClient clientset.Interface, ns string, before unversioned.Time) (int64, error) {
|
||||
items, err := kubeClient.Legacy().Pods(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -424,7 +425,7 @@ func deletePods(kubeClient client.Interface, ns string, before unversioned.Time)
|
|||
estimate = grace
|
||||
}
|
||||
}
|
||||
err := kubeClient.Pods(ns).Delete(items.Items[i].Name, deleteOptions)
|
||||
err := kubeClient.Legacy().Pods(ns).Delete(items.Items[i].Name, deleteOptions)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -435,17 +436,17 @@ func deletePods(kubeClient client.Interface, ns string, before unversioned.Time)
|
|||
return estimate, nil
|
||||
}
|
||||
|
||||
func deleteEvents(kubeClient client.Interface, ns string) error {
|
||||
return kubeClient.Events(ns).DeleteCollection(nil, api.ListOptions{})
|
||||
func deleteEvents(kubeClient clientset.Interface, ns string) error {
|
||||
return kubeClient.Legacy().Events(ns).DeleteCollection(nil, api.ListOptions{})
|
||||
}
|
||||
|
||||
func deleteSecrets(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.Secrets(ns).List(api.ListOptions{})
|
||||
func deleteSecrets(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().Secrets(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Secrets(ns).Delete(items.Items[i].Name)
|
||||
err := kubeClient.Legacy().Secrets(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -453,13 +454,13 @@ func deleteSecrets(kubeClient client.Interface, ns string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deletePersistentVolumeClaims(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.PersistentVolumeClaims(ns).List(api.ListOptions{})
|
||||
func deletePersistentVolumeClaims(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().PersistentVolumeClaims(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.PersistentVolumeClaims(ns).Delete(items.Items[i].Name)
|
||||
err := kubeClient.Legacy().PersistentVolumeClaims(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -467,7 +468,7 @@ func deletePersistentVolumeClaims(kubeClient client.Interface, ns string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteHorizontalPodAutoscalers(expClient client.ExtensionsInterface, ns string) error {
|
||||
func deleteHorizontalPodAutoscalers(expClient extensions_unversioned.ExtensionsInterface, ns string) error {
|
||||
items, err := expClient.HorizontalPodAutoscalers(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -481,13 +482,13 @@ func deleteHorizontalPodAutoscalers(expClient client.ExtensionsInterface, ns str
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteDaemonSets(expClient client.ExtensionsInterface, ns string) error {
|
||||
func deleteDaemonSets(expClient extensions_unversioned.ExtensionsInterface, ns string) error {
|
||||
items, err := expClient.DaemonSets(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := expClient.DaemonSets(ns).Delete(items.Items[i].Name)
|
||||
err := expClient.DaemonSets(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -495,7 +496,7 @@ func deleteDaemonSets(expClient client.ExtensionsInterface, ns string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteJobs(expClient client.ExtensionsInterface, ns string) error {
|
||||
func deleteJobs(expClient extensions_unversioned.ExtensionsInterface, ns string) error {
|
||||
items, err := expClient.Jobs(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -509,7 +510,7 @@ func deleteJobs(expClient client.ExtensionsInterface, ns string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteDeployments(expClient client.ExtensionsInterface, ns string) error {
|
||||
func deleteDeployments(expClient extensions_unversioned.ExtensionsInterface, ns string) error {
|
||||
items, err := expClient.Deployments(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -523,13 +524,13 @@ func deleteDeployments(expClient client.ExtensionsInterface, ns string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteIngress(expClient client.ExtensionsInterface, ns string) error {
|
||||
items, err := expClient.Ingress(ns).List(api.ListOptions{})
|
||||
func deleteIngress(expClient extensions_unversioned.ExtensionsInterface, ns string) error {
|
||||
items, err := expClient.Ingresses(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := expClient.Ingress(ns).Delete(items.Items[i].Name, nil)
|
||||
err := expClient.Ingresses(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -25,8 +25,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
|
@ -46,7 +47,7 @@ func TestFinalized(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFinalizeNamespaceFunc(t *testing.T) {
|
||||
mockClient := &testclient.Fake{}
|
||||
mockClient := &fake.Clientset{}
|
||||
testNamespace := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test",
|
||||
|
@ -64,7 +65,7 @@ func TestFinalizeNamespaceFunc(t *testing.T) {
|
|||
if !actions[0].Matches("create", "namespaces") || actions[0].GetSubresource() != "finalize" {
|
||||
t.Errorf("Expected finalize-namespace action %v", actions[0])
|
||||
}
|
||||
finalizers := actions[0].(testclient.CreateAction).GetObject().(*api.Namespace).Spec.Finalizers
|
||||
finalizers := actions[0].(core.CreateAction).GetObject().(*api.Namespace).Spec.Finalizers
|
||||
if len(finalizers) != 1 {
|
||||
t.Errorf("There should be a single finalizer remaining")
|
||||
}
|
||||
|
@ -144,7 +145,7 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIV
|
|||
}
|
||||
|
||||
for scenario, testInput := range scenarios {
|
||||
mockClient := testclient.NewSimpleFake(testInput.testNamespace)
|
||||
mockClient := fake.NewSimpleClientset(testInput.testNamespace)
|
||||
if containsVersion(versions, "extensions/v1beta1") {
|
||||
resources := []unversioned.APIResource{}
|
||||
for _, resource := range []string{"daemonsets", "deployments", "jobs", "horizontalpodautoscalers", "ingresses"} {
|
||||
|
@ -175,9 +176,9 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIV
|
|||
}
|
||||
|
||||
func TestRetryOnConflictError(t *testing.T) {
|
||||
mockClient := &testclient.Fake{}
|
||||
mockClient := &fake.Clientset{}
|
||||
numTries := 0
|
||||
retryOnce := func(kubeClient client.Interface, namespace *api.Namespace) (*api.Namespace, error) {
|
||||
retryOnce := func(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) {
|
||||
numTries++
|
||||
if numTries <= 1 {
|
||||
return namespace, errors.NewConflict(api.Resource("namespaces"), namespace.Name, fmt.Errorf("ERROR!"))
|
||||
|
@ -203,7 +204,7 @@ func TestSyncNamespaceThatIsTerminatingV1Beta1(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSyncNamespaceThatIsActive(t *testing.T) {
|
||||
mockClient := &testclient.Fake{}
|
||||
mockClient := &fake.Clientset{}
|
||||
testNamespace := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test",
|
||||
|
@ -226,7 +227,7 @@ func TestSyncNamespaceThatIsActive(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunStop(t *testing.T) {
|
||||
mockClient := &testclient.Fake{}
|
||||
mockClient := &fake.Clientset{}
|
||||
|
||||
nsController := NewNamespaceController(mockClient, &unversioned.APIVersions{}, 1*time.Second)
|
||||
|
||||
|
|
|
@ -28,7 +28,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
@ -67,7 +69,7 @@ type NodeController struct {
|
|||
clusterCIDR *net.IPNet
|
||||
deletingPodsRateLimiter util.RateLimiter
|
||||
knownNodeSet sets.String
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
// Method for easy mocking in unittest.
|
||||
lookupIP func(host string) ([]net.IP, error)
|
||||
// Value used if sync_nodes_status=False. NodeController will not proactively
|
||||
|
@ -122,7 +124,7 @@ type NodeController struct {
|
|||
// NewNodeController returns a new node controller to sync instances from cloudprovider.
|
||||
func NewNodeController(
|
||||
cloud cloudprovider.Interface,
|
||||
kubeClient client.Interface,
|
||||
kubeClient clientset.Interface,
|
||||
podEvictionTimeout time.Duration,
|
||||
deletionEvictionLimiter util.RateLimiter,
|
||||
terminationEvictionLimiter util.RateLimiter,
|
||||
|
@ -136,7 +138,7 @@ func NewNodeController(
|
|||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
} else {
|
||||
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
|
@ -169,10 +171,10 @@ func NewNodeController(
|
|||
nc.podStore.Store, nc.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return nc.kubeClient.Pods(api.NamespaceAll).List(options)
|
||||
return nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return nc.kubeClient.Pods(api.NamespaceAll).Watch(options)
|
||||
return nc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
|
@ -185,10 +187,10 @@ func NewNodeController(
|
|||
nc.nodeStore.Store, nc.nodeController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return nc.kubeClient.Nodes().List(options)
|
||||
return nc.kubeClient.Legacy().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return nc.kubeClient.Nodes().Watch(options)
|
||||
return nc.kubeClient.Legacy().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Node{},
|
||||
|
@ -363,9 +365,9 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
func forcefullyDeletePod(c client.Interface, pod *api.Pod) {
|
||||
func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) {
|
||||
var zero int64
|
||||
err := c.Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
err := c.Legacy().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
|
@ -375,7 +377,7 @@ func forcefullyDeletePod(c client.Interface, pod *api.Pod) {
|
|||
// post "NodeReady==ConditionUnknown". It also evicts all pods if node is not ready or
|
||||
// not reachable for a long period of time.
|
||||
func (nc *NodeController) monitorNodeStatus() error {
|
||||
nodes, err := nc.kubeClient.Nodes().List(api.ListOptions{})
|
||||
nodes, err := nc.kubeClient.Legacy().Nodes().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -419,7 +421,7 @@ func (nc *NodeController) monitorNodeStatus() error {
|
|||
break
|
||||
}
|
||||
name := node.Name
|
||||
node, err = nc.kubeClient.Nodes().Get(name)
|
||||
node, err = nc.kubeClient.Legacy().Nodes().Get(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
|
||||
break
|
||||
|
@ -485,7 +487,7 @@ func (nc *NodeController) monitorNodeStatus() error {
|
|||
continue
|
||||
}
|
||||
|
||||
if err := nc.kubeClient.Nodes().Delete(node.Name); err != nil {
|
||||
if err := nc.kubeClient.Legacy().Nodes().Delete(node.Name, nil); err != nil {
|
||||
glog.Errorf("Unable to delete node %s: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
|
@ -518,7 +520,7 @@ func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) {
|
|||
}
|
||||
glog.V(4).Infof("Assigning node %s CIDR %s", node.Name, podCIDR)
|
||||
node.Spec.PodCIDR = podCIDR
|
||||
if _, err := nc.kubeClient.Nodes().Update(&node); err != nil {
|
||||
if _, err := nc.kubeClient.Legacy().Nodes().Update(&node); err != nil {
|
||||
nc.recordNodeStatusChange(&node, "CIDRAssignmentFailed")
|
||||
}
|
||||
}
|
||||
|
@ -696,7 +698,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
|||
}
|
||||
|
||||
if !api.Semantic.DeepEqual(nc.getCondition(&node.Status, api.NodeReady), &lastReadyCondition) {
|
||||
if _, err = nc.kubeClient.Nodes().UpdateStatus(node); err != nil {
|
||||
if _, err = nc.kubeClient.Legacy().Nodes().UpdateStatus(node); err != nil {
|
||||
glog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||
return gracePeriod, lastReadyCondition, readyCondition, err
|
||||
} else {
|
||||
|
@ -718,7 +720,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
|||
func (nc *NodeController) hasPods(nodeName string) (bool, error) {
|
||||
selector := fields.OneTermEqualSelector(client.PodHost, nodeName)
|
||||
options := api.ListOptions{FieldSelector: selector}
|
||||
pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(options)
|
||||
pods, err := nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -753,7 +755,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
|
|||
remaining := false
|
||||
selector := fields.OneTermEqualSelector(client.PodHost, nodeName)
|
||||
options := api.ListOptions{FieldSelector: selector}
|
||||
pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(options)
|
||||
pods, err := nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
if err != nil {
|
||||
return remaining, err
|
||||
}
|
||||
|
@ -779,7 +781,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
|
|||
|
||||
glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
|
||||
nc.recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
|
||||
if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
||||
if err := nc.kubeClient.Legacy().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
||||
return false, err
|
||||
}
|
||||
remaining = true
|
||||
|
@ -792,7 +794,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
|
|||
func (nc *NodeController) markAllPodsNotReady(nodeName string) error {
|
||||
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||
opts := api.ListOptions{FieldSelector: fields.OneTermEqualSelector(client.PodHost, nodeName)}
|
||||
pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(opts)
|
||||
pods, err := nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -808,7 +810,7 @@ func (nc *NodeController) markAllPodsNotReady(nodeName string) error {
|
|||
if cond.Type == api.PodReady {
|
||||
pod.Status.Conditions[i].Status = api.ConditionFalse
|
||||
glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
|
||||
pod, err := nc.kubeClient.Pods(pod.Namespace).UpdateStatus(&pod)
|
||||
pod, err := nc.kubeClient.Legacy().Pods(pod.Namespace).UpdateStatus(&pod)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to updated status for pod %q: %v", format.Pod(pod), err)
|
||||
errMsg = append(errMsg, fmt.Sprintf("%v", err))
|
||||
|
@ -834,7 +836,7 @@ func (nc *NodeController) terminatePods(nodeName string, since time.Time) (bool,
|
|||
|
||||
selector := fields.OneTermEqualSelector(client.PodHost, nodeName)
|
||||
options := api.ListOptions{FieldSelector: selector}
|
||||
pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(options)
|
||||
pods, err := nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
if err != nil {
|
||||
return false, nextAttempt, err
|
||||
}
|
||||
|
@ -863,7 +865,7 @@ func (nc *NodeController) terminatePods(nodeName string, since time.Time) (bool,
|
|||
remaining = 0
|
||||
glog.V(2).Infof("Removing pod %v after %s grace period", pod.Name, grace)
|
||||
nc.recordNodeEvent(nodeName, api.EventTypeNormal, "TerminatingEvictedPod", fmt.Sprintf("Pod %s has exceeded the grace period for deletion after being evicted from Node %q and is being force killed", pod.Name, nodeName))
|
||||
if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil {
|
||||
if err := nc.kubeClient.Legacy().Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil {
|
||||
glog.Errorf("Error completing deletion of pod %s: %v", pod.Name, err)
|
||||
complete = false
|
||||
}
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
@ -45,7 +45,7 @@ const (
|
|||
// PodsInterface and PodInterface to test list & delet pods, which is implemented in
|
||||
// the embedded client.Fake field.
|
||||
type FakeNodeHandler struct {
|
||||
*testclient.Fake
|
||||
*fake.Clientset
|
||||
|
||||
// Input: Hooks determine if request is valid or not
|
||||
CreateHook func(*FakeNodeHandler, *api.Node) bool
|
||||
|
@ -62,8 +62,17 @@ type FakeNodeHandler struct {
|
|||
createLock sync.Mutex
|
||||
}
|
||||
|
||||
func (c *FakeNodeHandler) Nodes() client.NodeInterface {
|
||||
return c
|
||||
type FakeLegacyHandler struct {
|
||||
unversioned_legacy.LegacyInterface
|
||||
n *FakeNodeHandler
|
||||
}
|
||||
|
||||
func (c *FakeNodeHandler) Legacy() unversioned_legacy.LegacyInterface {
|
||||
return &FakeLegacyHandler{c.Clientset.Legacy(), c}
|
||||
}
|
||||
|
||||
func (m *FakeLegacyHandler) Nodes() unversioned_legacy.NodeInterface {
|
||||
return m.n
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) {
|
||||
|
@ -115,12 +124,16 @@ func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error) {
|
|||
return nodeList, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Delete(id string) error {
|
||||
func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error {
|
||||
m.DeletedNodes = append(m.DeletedNodes, newNode(id))
|
||||
m.RequestCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) DeleteCollection(opt *api.DeleteOptions, listOpts api.ListOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) {
|
||||
nodeCopy := *node
|
||||
m.UpdatedNodes = append(m.UpdatedNodes, &nodeCopy)
|
||||
|
@ -162,7 +175,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
daemonSets: nil,
|
||||
timeToPass: 0,
|
||||
|
@ -191,7 +204,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
daemonSets: nil,
|
||||
timeToPass: evictionTimeout,
|
||||
|
@ -230,7 +243,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(
|
||||
Clientset: fake.NewSimpleClientset(
|
||||
&api.PodList{
|
||||
Items: []api.Pod{
|
||||
{
|
||||
|
@ -296,7 +309,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
daemonSets: nil,
|
||||
timeToPass: time.Hour,
|
||||
|
@ -335,7 +348,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
daemonSets: nil,
|
||||
timeToPass: evictionTimeout - testNodeMonitorGracePeriod,
|
||||
|
@ -374,7 +387,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
daemonSets: nil,
|
||||
timeToPass: 60 * time.Minute,
|
||||
|
@ -460,7 +473,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedRequestCount: 2, // List+Update
|
||||
expectedNodes: []*api.Node{
|
||||
|
@ -504,7 +517,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedRequestCount: 1, // List
|
||||
expectedNodes: nil,
|
||||
|
@ -546,7 +559,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedRequestCount: 3, // (List+)List+Update
|
||||
timeToPass: time.Hour,
|
||||
|
@ -638,7 +651,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedRequestCount: 1, // List
|
||||
expectedNodes: nil,
|
||||
|
@ -691,7 +704,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedPodStatusUpdate: false,
|
||||
},
|
||||
|
@ -725,7 +738,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedPodStatusUpdate: false,
|
||||
},
|
||||
|
@ -766,7 +779,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
timeToPass: 1 * time.Minute,
|
||||
newNodeStatus: api.NodeStatus{
|
||||
|
@ -875,7 +888,7 @@ func TestNodeDeletion(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node1")}}),
|
||||
Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node1")}}),
|
||||
}
|
||||
|
||||
nodeController := NewNodeController(nil, fakeNodeHandler, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
|
||||
|
@ -884,7 +897,7 @@ func TestNodeDeletion(t *testing.T) {
|
|||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
fakeNodeHandler.Delete("node1")
|
||||
fakeNodeHandler.Delete("node1", nil)
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
unversioned_extensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
@ -43,8 +44,8 @@ const (
|
|||
)
|
||||
|
||||
type HorizontalController struct {
|
||||
scaleNamespacer client.ScaleNamespacer
|
||||
hpaNamespacer client.HorizontalPodAutoscalersNamespacer
|
||||
scaleNamespacer unversioned_extensions.ScalesGetter
|
||||
hpaNamespacer unversioned_extensions.HorizontalPodAutoscalersGetter
|
||||
|
||||
metricsClient metrics.MetricsClient
|
||||
eventRecorder record.EventRecorder
|
||||
|
@ -53,7 +54,7 @@ type HorizontalController struct {
|
|||
var downscaleForbiddenWindow = 5 * time.Minute
|
||||
var upscaleForbiddenWindow = 3 * time.Minute
|
||||
|
||||
func NewHorizontalController(evtNamespacer client.EventNamespacer, scaleNamespacer client.ScaleNamespacer, hpaNamespacer client.HorizontalPodAutoscalersNamespacer, metricsClient metrics.MetricsClient) *HorizontalController {
|
||||
func NewHorizontalController(evtNamespacer unversioned_legacy.EventsGetter, scaleNamespacer unversioned_extensions.ScalesGetter, hpaNamespacer unversioned_extensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient) *HorizontalController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(evtNamespacer.Events(""))
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
|
|
@ -27,6 +27,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
_ "k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
@ -68,7 +70,7 @@ type testCase struct {
|
|||
verifyEvents bool
|
||||
}
|
||||
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
namespace := "test-namespace"
|
||||
hpaName := "test-hpa"
|
||||
rcName := "test-rc"
|
||||
|
@ -77,8 +79,8 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
|||
tc.scaleUpdated = false
|
||||
tc.eventCreated = false
|
||||
|
||||
fakeClient := &testclient.Fake{}
|
||||
fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &extensions.HorizontalPodAutoscalerList{
|
||||
Items: []extensions.HorizontalPodAutoscaler{
|
||||
{
|
||||
|
@ -113,7 +115,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("get", "replicationController", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
fakeClient.AddReactor("get", "replicationController", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: rcName,
|
||||
|
@ -130,7 +132,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &api.PodList{}
|
||||
for i := 0; i < len(tc.reportedCPURequests); i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
|
@ -162,7 +164,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddProxyReactor("services", func(action testclient.Action) (handled bool, ret client.ResponseWrapper, err error) {
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret client.ResponseWrapper, err error) {
|
||||
timestamp := time.Now()
|
||||
metrics := heapster.MetricResultList{}
|
||||
for _, level := range tc.reportedLevels {
|
||||
|
@ -176,7 +178,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
|||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "replicationController", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
fakeClient.AddReactor("update", "replicationController", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := action.(testclient.UpdateAction).GetObject().(*extensions.Scale)
|
||||
replicas := action.(testclient.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas)
|
||||
|
@ -184,7 +186,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := action.(testclient.UpdateAction).GetObject().(*extensions.HorizontalPodAutoscaler)
|
||||
assert.Equal(t, namespace, obj.Namespace)
|
||||
assert.Equal(t, hpaName, obj.Name)
|
||||
|
@ -192,7 +194,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("*", "events", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
fakeClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := action.(testclient.CreateAction).GetObject().(*api.Event)
|
||||
if tc.verifyEvents {
|
||||
assert.Equal(t, "SuccessfulRescale", obj.Reason)
|
||||
|
@ -215,7 +217,7 @@ func (tc *testCase) verifyResults(t *testing.T) {
|
|||
func (tc *testCase) runTest(t *testing.T) {
|
||||
testClient := tc.prepareTestClient(t)
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
hpaController := NewHorizontalController(testClient, testClient.Extensions(), testClient.Extensions(), metricsClient)
|
||||
hpaController := NewHorizontalController(testClient.Legacy(), testClient.Extensions(), testClient.Extensions(), metricsClient)
|
||||
err := hpaController.reconcileAutoscalers()
|
||||
assert.Equal(t, nil, err)
|
||||
if tc.verifyEvents {
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
||||
heapster "k8s.io/heapster/api/v1/types"
|
||||
|
@ -66,7 +66,7 @@ type metricDefinition struct {
|
|||
|
||||
// HeapsterMetricsClient is Heapster-based implementation of MetricsClient
|
||||
type HeapsterMetricsClient struct {
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
heapsterNamespace string
|
||||
heapsterScheme string
|
||||
heapsterService string
|
||||
|
@ -90,7 +90,7 @@ func getHeapsterCustomMetricDefinition(metricName string) metricDefinition {
|
|||
}
|
||||
|
||||
// NewHeapsterMetricsClient returns a new instance of Heapster-based implementation of MetricsClient interface.
|
||||
func NewHeapsterMetricsClient(client client.Interface, namespace, scheme, service, port string) *HeapsterMetricsClient {
|
||||
func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) *HeapsterMetricsClient {
|
||||
return &HeapsterMetricsClient{
|
||||
client: client,
|
||||
heapsterNamespace: namespace,
|
||||
|
@ -113,7 +113,7 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st
|
|||
avgRequest int64, timestamp time.Time, err error) {
|
||||
|
||||
labelSelector := labels.SelectorFromSet(labels.Set(selector))
|
||||
podList, err := h.client.Pods(namespace).
|
||||
podList, err := h.client.Legacy().Pods(namespace).
|
||||
List(api.ListOptions{LabelSelector: labelSelector})
|
||||
|
||||
if err != nil {
|
||||
|
@ -152,7 +152,7 @@ func (h *HeapsterMetricsClient) GetCustomMetric(customMetricName string, namespa
|
|||
metricSpec := getHeapsterCustomMetricDefinition(customMetricName)
|
||||
|
||||
labelSelector := labels.SelectorFromSet(labels.Set(selector))
|
||||
podList, err := h.client.Pods(namespace).List(api.ListOptions{LabelSelector: labelSelector})
|
||||
podList, err := h.client.Legacy().Pods(namespace).List(api.ListOptions{LabelSelector: labelSelector})
|
||||
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list: %v", err)
|
||||
|
@ -179,7 +179,7 @@ func (h *HeapsterMetricsClient) getForPods(metricSpec metricDefinition, namespac
|
|||
strings.Join(podNames, ","),
|
||||
metricSpec.name)
|
||||
|
||||
resultRaw, err := h.client.Services(h.heapsterNamespace).
|
||||
resultRaw, err := h.client.Legacy().Services(h.heapsterNamespace).
|
||||
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
|
||||
DoRaw()
|
||||
|
||||
|
|
|
@ -26,8 +26,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
_ "k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
|
||||
heapster "k8s.io/heapster/api/v1/types"
|
||||
|
@ -70,16 +71,16 @@ type testCase struct {
|
|||
selector map[string]string
|
||||
}
|
||||
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
namespace := "test-namespace"
|
||||
tc.namespace = namespace
|
||||
podNamePrefix := "test-pod"
|
||||
selector := map[string]string{"name": podNamePrefix}
|
||||
tc.selector = selector
|
||||
|
||||
fakeClient := &testclient.Fake{}
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &api.PodList{}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
|
@ -109,7 +110,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddProxyReactor("services", func(action testclient.Action) (handled bool, ret client.ResponseWrapper, err error) {
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret client.ResponseWrapper, err error) {
|
||||
metrics := heapster.MetricResultList{}
|
||||
var latestTimestamp time.Time
|
||||
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
|
@ -37,7 +37,7 @@ import (
|
|||
// ResourceQuotaController is responsible for tracking quota usage status in the system
|
||||
type ResourceQuotaController struct {
|
||||
// Must have authority to list all resources in the system, and update quota status
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
// An index of resource quota objects by namespace
|
||||
rqIndexer cache.Indexer
|
||||
// Watches changes to all resource quota
|
||||
|
@ -55,7 +55,7 @@ type ResourceQuotaController struct {
|
|||
}
|
||||
|
||||
// NewResourceQuotaController creates a new ResourceQuotaController
|
||||
func NewResourceQuotaController(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc) *ResourceQuotaController {
|
||||
func NewResourceQuotaController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *ResourceQuotaController {
|
||||
|
||||
rq := &ResourceQuotaController{
|
||||
kubeClient: kubeClient,
|
||||
|
@ -66,10 +66,10 @@ func NewResourceQuotaController(kubeClient client.Interface, resyncPeriod contro
|
|||
rq.rqIndexer, rq.rqController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.ResourceQuotas(api.NamespaceAll).List(options)
|
||||
return rq.kubeClient.Legacy().ResourceQuotas(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rq.kubeClient.ResourceQuotas(api.NamespaceAll).Watch(options)
|
||||
return rq.kubeClient.Legacy().ResourceQuotas(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ResourceQuota{},
|
||||
|
@ -106,10 +106,10 @@ func NewResourceQuotaController(kubeClient client.Interface, resyncPeriod contro
|
|||
rq.podStore.Store, rq.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.Pods(api.NamespaceAll).List(options)
|
||||
return rq.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rq.kubeClient.Pods(api.NamespaceAll).Watch(options)
|
||||
return rq.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
|
@ -265,7 +265,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
|||
|
||||
pods := &api.PodList{}
|
||||
if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] {
|
||||
pods, err = rq.kubeClient.Pods(usage.Namespace).List(api.ListOptions{})
|
||||
pods, err = rq.kubeClient.Legacy().Pods(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -288,31 +288,31 @@ func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
|||
case api.ResourcePods:
|
||||
value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI)
|
||||
case api.ResourceServices:
|
||||
items, err := rq.kubeClient.Services(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Legacy().Services(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceReplicationControllers:
|
||||
items, err := rq.kubeClient.ReplicationControllers(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Legacy().ReplicationControllers(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceQuotas:
|
||||
items, err := rq.kubeClient.ResourceQuotas(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Legacy().ResourceQuotas(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceSecrets:
|
||||
items, err := rq.kubeClient.Secrets(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Legacy().Secrets(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourcePersistentVolumeClaims:
|
||||
items, err := rq.kubeClient.PersistentVolumeClaims(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Legacy().PersistentVolumeClaims(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -334,7 +334,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
|||
|
||||
// update the usage only if it changed
|
||||
if dirty {
|
||||
_, err = rq.kubeClient.ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
_, err = rq.kubeClient.Legacy().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
@ -173,7 +174,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
kubeClient := testclient.NewSimpleFake(&podList, "a)
|
||||
kubeClient := fake.NewSimpleClientset(&podList, "a)
|
||||
|
||||
ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
|
||||
err := ResourceQuotaController.syncResourceQuota(quota)
|
||||
|
@ -230,7 +231,7 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
kubeClient := testclient.NewSimpleFake("a)
|
||||
kubeClient := fake.NewSimpleClientset("a)
|
||||
|
||||
ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
|
||||
err := ResourceQuotaController.syncResourceQuota(quota)
|
||||
|
@ -277,7 +278,7 @@ func TestSyncResourceQuotaNoChange(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
kubeClient := testclient.NewSimpleFake(&api.PodList{}, "a)
|
||||
kubeClient := fake.NewSimpleClientset(&api.PodList{}, "a)
|
||||
|
||||
ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
|
||||
err := ResourceQuotaController.syncResourceQuota(quota)
|
||||
|
|
|
@ -23,19 +23,19 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
type RouteController struct {
|
||||
routes cloudprovider.Routes
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
clusterName string
|
||||
clusterCIDR *net.IPNet
|
||||
}
|
||||
|
||||
func New(routes cloudprovider.Routes, kubeClient client.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
|
||||
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
|
||||
return &RouteController{
|
||||
routes: routes,
|
||||
kubeClient: kubeClient,
|
||||
|
@ -59,7 +59,7 @@ func (rc *RouteController) reconcileNodeRoutes() error {
|
|||
}
|
||||
// TODO (cjcullen): use pkg/controller/framework.NewInformer to watch this
|
||||
// and reduce the number of lists needed.
|
||||
nodeList, err := rc.kubeClient.Nodes().List(api.ListOptions{})
|
||||
nodeList, err := rc.kubeClient.Legacy().Nodes().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing nodes: %v", err)
|
||||
}
|
||||
|
|
|
@ -27,8 +27,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
|
@ -67,7 +68,7 @@ type serviceCache struct {
|
|||
|
||||
type ServiceController struct {
|
||||
cloud cloudprovider.Interface
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
clusterName string
|
||||
balancer cloudprovider.LoadBalancer
|
||||
zone cloudprovider.Zone
|
||||
|
@ -79,9 +80,9 @@ type ServiceController struct {
|
|||
|
||||
// New returns a new service controller to keep cloud provider service resources
|
||||
// (like load balancers) in sync with the registry.
|
||||
func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController {
|
||||
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) *ServiceController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
broadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})
|
||||
|
||||
return &ServiceController{
|
||||
|
@ -115,8 +116,8 @@ func (s *ServiceController) Run(serviceSyncPeriod, nodeSyncPeriod time.Duration)
|
|||
// We have to make this check beecause the ListWatch that we use in
|
||||
// WatchServices requires Client functions that aren't in the interface
|
||||
// for some reason.
|
||||
if _, ok := s.kubeClient.(*client.Client); !ok {
|
||||
return fmt.Errorf("ServiceController only works with real Client objects, but was passed something else satisfying the client Interface.")
|
||||
if _, ok := s.kubeClient.(*clientset.Clientset); !ok {
|
||||
return fmt.Errorf("ServiceController only works with real Client objects, but was passed something else satisfying the clientset.Interface.")
|
||||
}
|
||||
|
||||
// Get the currently existing set of services and then all future creates
|
||||
|
@ -133,13 +134,13 @@ func (s *ServiceController) Run(serviceSyncPeriod, nodeSyncPeriod time.Duration)
|
|||
}),
|
||||
s.cache,
|
||||
)
|
||||
lw := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "services", api.NamespaceAll, fields.Everything())
|
||||
lw := cache.NewListWatchFromClient(s.kubeClient.(*clientset.Clientset).LegacyClient, "services", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(lw, &api.Service{}, serviceQueue, serviceSyncPeriod).Run()
|
||||
for i := 0; i < workerGoroutines; i++ {
|
||||
go s.watchServices(serviceQueue)
|
||||
}
|
||||
|
||||
nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "nodes", api.NamespaceAll, fields.Everything())
|
||||
nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*clientset.Clientset).LegacyClient, "nodes", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(nodeLW, &api.Node{}, s.nodeLister.Store, 0).Run()
|
||||
go s.nodeSyncLoop(nodeSyncPeriod)
|
||||
return nil
|
||||
|
@ -343,7 +344,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
|
|||
func (s *ServiceController) persistUpdate(service *api.Service) error {
|
||||
var err error
|
||||
for i := 0; i < clientRetryCount; i++ {
|
||||
_, err = s.kubeClient.Services(service.Namespace).UpdateStatus(service)
|
||||
_, err = s.kubeClient.Legacy().Services(service.Namespace).UpdateStatus(service)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
@ -90,7 +90,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
|||
for _, item := range table {
|
||||
cloud := &fakecloud.FakeCloud{}
|
||||
cloud.Region = region
|
||||
client := &testclient.Fake{}
|
||||
client := &fake.Clientset{}
|
||||
controller := New(cloud, client, "test-cluster")
|
||||
controller.init()
|
||||
cloud.Calls = nil // ignore any cloud calls made in init()
|
||||
|
@ -210,7 +210,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
|
|||
cloud := &fakecloud.FakeCloud{}
|
||||
|
||||
cloud.Region = region
|
||||
client := &testclient.Fake{}
|
||||
client := &fake.Clientset{}
|
||||
controller := New(cloud, client, "test-cluster2")
|
||||
controller.init()
|
||||
cloud.Calls = nil // ignore any cloud calls made in init()
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
|
@ -66,7 +67,7 @@ func DefaultServiceAccountsControllerOptions() ServiceAccountsControllerOptions
|
|||
}
|
||||
|
||||
// NewServiceAccountsController returns a new *ServiceAccountsController.
|
||||
func NewServiceAccountsController(cl client.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
|
||||
func NewServiceAccountsController(cl clientset.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
|
||||
e := &ServiceAccountsController{
|
||||
client: cl,
|
||||
serviceAccountsToEnsure: options.ServiceAccounts,
|
||||
|
@ -81,11 +82,11 @@ func NewServiceAccountsController(cl client.Interface, options ServiceAccountsCo
|
|||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = accountSelector
|
||||
return e.client.ServiceAccounts(api.NamespaceAll).List(options)
|
||||
return e.client.Legacy().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = accountSelector
|
||||
return e.client.ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
return e.client.Legacy().ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
|
@ -99,10 +100,10 @@ func NewServiceAccountsController(cl client.Interface, options ServiceAccountsCo
|
|||
e.namespaces, e.namespaceController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Namespaces().List(options)
|
||||
return e.client.Legacy().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Namespaces().Watch(options)
|
||||
return e.client.Legacy().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
|
@ -121,7 +122,7 @@ func NewServiceAccountsController(cl client.Interface, options ServiceAccountsCo
|
|||
type ServiceAccountsController struct {
|
||||
stopChan chan struct{}
|
||||
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
serviceAccountsToEnsure []api.ServiceAccount
|
||||
|
||||
serviceAccounts cache.Indexer
|
||||
|
@ -216,7 +217,7 @@ func (e *ServiceAccountsController) createServiceAccountIfNeeded(sa api.ServiceA
|
|||
// createDefaultServiceAccount creates a default ServiceAccount in the specified namespace
|
||||
func (e *ServiceAccountsController) createServiceAccount(sa api.ServiceAccount, namespace string) {
|
||||
sa.Namespace = namespace
|
||||
if _, err := e.client.ServiceAccounts(namespace).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) {
|
||||
if _, err := e.client.Legacy().ServiceAccounts(namespace).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) {
|
||||
glog.Error(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
@ -149,7 +150,7 @@ func TestServiceAccountCreation(t *testing.T) {
|
|||
}
|
||||
|
||||
for k, tc := range testcases {
|
||||
client := testclient.NewSimpleFake(defaultServiceAccount, managedServiceAccount)
|
||||
client := fake.NewSimpleClientset(defaultServiceAccount, managedServiceAccount)
|
||||
options := DefaultServiceAccountsControllerOptions()
|
||||
options.ServiceAccounts = []api.ServiceAccount{
|
||||
{ObjectMeta: api.ObjectMeta{Name: defaultName}},
|
||||
|
|
|
@ -18,7 +18,7 @@ package serviceaccount
|
|||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/registry/generic"
|
||||
"k8s.io/kubernetes/pkg/registry/secret"
|
||||
secretetcd "k8s.io/kubernetes/pkg/registry/secret/etcd"
|
||||
|
@ -28,23 +28,23 @@ import (
|
|||
"k8s.io/kubernetes/pkg/storage"
|
||||
)
|
||||
|
||||
// clientGetter implements ServiceAccountTokenGetter using a client.Interface
|
||||
// clientGetter implements ServiceAccountTokenGetter using a clientset.Interface
|
||||
type clientGetter struct {
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
}
|
||||
|
||||
// NewGetterFromClient returns a ServiceAccountTokenGetter that
|
||||
// uses the specified client to retrieve service accounts and secrets.
|
||||
// The client should NOT authenticate using a service account token
|
||||
// the returned getter will be used to retrieve, or recursion will result.
|
||||
func NewGetterFromClient(c client.Interface) serviceaccount.ServiceAccountTokenGetter {
|
||||
func NewGetterFromClient(c clientset.Interface) serviceaccount.ServiceAccountTokenGetter {
|
||||
return clientGetter{c}
|
||||
}
|
||||
func (c clientGetter) GetServiceAccount(namespace, name string) (*api.ServiceAccount, error) {
|
||||
return c.client.ServiceAccounts(namespace).Get(name)
|
||||
return c.client.Legacy().ServiceAccounts(namespace).Get(name)
|
||||
}
|
||||
func (c clientGetter) GetSecret(namespace, name string) (*api.Secret, error) {
|
||||
return c.client.Secrets(namespace).Get(name)
|
||||
return c.client.Legacy().Secrets(namespace).Get(name)
|
||||
}
|
||||
|
||||
// registryGetter implements ServiceAccountTokenGetter using a service account and secret registry
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
|
@ -61,7 +62,7 @@ type TokensControllerOptions struct {
|
|||
}
|
||||
|
||||
// NewTokensController returns a new *TokensController.
|
||||
func NewTokensController(cl client.Interface, options TokensControllerOptions) *TokensController {
|
||||
func NewTokensController(cl clientset.Interface, options TokensControllerOptions) *TokensController {
|
||||
e := &TokensController{
|
||||
client: cl,
|
||||
token: options.TokenGenerator,
|
||||
|
@ -71,10 +72,10 @@ func NewTokensController(cl client.Interface, options TokensControllerOptions) *
|
|||
e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.ServiceAccounts(api.NamespaceAll).List(options)
|
||||
return e.client.Legacy().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
return e.client.Legacy().ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
|
@ -92,11 +93,11 @@ func NewTokensController(cl client.Interface, options TokensControllerOptions) *
|
|||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = tokenSelector
|
||||
return e.client.Secrets(api.NamespaceAll).List(options)
|
||||
return e.client.Legacy().Secrets(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = tokenSelector
|
||||
return e.client.Secrets(api.NamespaceAll).Watch(options)
|
||||
return e.client.Legacy().Secrets(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Secret{},
|
||||
|
@ -119,7 +120,7 @@ func NewTokensController(cl client.Interface, options TokensControllerOptions) *
|
|||
type TokensController struct {
|
||||
stopChan chan struct{}
|
||||
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
token serviceaccount.TokenGenerator
|
||||
|
||||
rootCA []byte
|
||||
|
@ -291,7 +292,7 @@ func (e *TokensController) createSecretIfNeeded(serviceAccount *api.ServiceAccou
|
|||
func (e *TokensController) createSecret(serviceAccount *api.ServiceAccount) error {
|
||||
// We don't want to update the cache's copy of the service account
|
||||
// so add the secret to a freshly retrieved copy of the service account
|
||||
serviceAccounts := e.client.ServiceAccounts(serviceAccount.Namespace)
|
||||
serviceAccounts := e.client.Legacy().ServiceAccounts(serviceAccount.Namespace)
|
||||
liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -329,7 +330,7 @@ func (e *TokensController) createSecret(serviceAccount *api.ServiceAccount) erro
|
|||
}
|
||||
|
||||
// Save the secret
|
||||
if _, err := e.client.Secrets(serviceAccount.Namespace).Create(secret); err != nil {
|
||||
if _, err := e.client.Legacy().Secrets(serviceAccount.Namespace).Create(secret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -339,7 +340,7 @@ func (e *TokensController) createSecret(serviceAccount *api.ServiceAccount) erro
|
|||
if err != nil {
|
||||
// we weren't able to use the token, try to clean it up.
|
||||
glog.V(2).Infof("Deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
|
||||
if err := e.client.Secrets(secret.Namespace).Delete(secret.Name); err != nil {
|
||||
if err := e.client.Legacy().Secrets(secret.Namespace).Delete(secret.Name, nil); err != nil {
|
||||
glog.Error(err) // if we fail, just log it
|
||||
}
|
||||
}
|
||||
|
@ -389,7 +390,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAcco
|
|||
secret.Annotations[api.ServiceAccountUIDKey] = string(serviceAccount.UID)
|
||||
|
||||
// Save the secret
|
||||
if _, err := e.client.Secrets(secret.Namespace).Update(secret); err != nil {
|
||||
if _, err := e.client.Legacy().Secrets(secret.Namespace).Update(secret); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -397,7 +398,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAcco
|
|||
|
||||
// deleteSecret deletes the given secret
|
||||
func (e *TokensController) deleteSecret(secret *api.Secret) error {
|
||||
return e.client.Secrets(secret.Namespace).Delete(secret.Name)
|
||||
return e.client.Legacy().Secrets(secret.Namespace).Delete(secret.Name, nil)
|
||||
}
|
||||
|
||||
// removeSecretReferenceIfNeeded updates the given ServiceAccount to remove a reference to the given secretName if needed.
|
||||
|
@ -410,7 +411,7 @@ func (e *TokensController) removeSecretReferenceIfNeeded(serviceAccount *api.Ser
|
|||
|
||||
// We don't want to update the cache's copy of the service account
|
||||
// so remove the secret from a freshly retrieved copy of the service account
|
||||
serviceAccounts := e.client.ServiceAccounts(serviceAccount.Namespace)
|
||||
serviceAccounts := e.client.Legacy().ServiceAccounts(serviceAccount.Namespace)
|
||||
serviceAccount, err := serviceAccounts.Get(serviceAccount.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -460,7 +461,7 @@ func (e *TokensController) getServiceAccount(secret *api.Secret, fetchOnCacheMis
|
|||
}
|
||||
|
||||
if fetchOnCacheMiss {
|
||||
serviceAccount, err := e.client.ServiceAccounts(secret.Namespace).Get(name)
|
||||
serviceAccount, err := e.client.Legacy().ServiceAccounts(secret.Namespace).Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilrand "k8s.io/kubernetes/pkg/util/rand"
|
||||
)
|
||||
|
@ -179,16 +180,16 @@ func TestTokenCreation(t *testing.T) {
|
|||
UpdatedSecret *api.Secret
|
||||
DeletedSecret *api.Secret
|
||||
|
||||
ExpectedActions []testclient.Action
|
||||
ExpectedActions []core.Action
|
||||
}{
|
||||
"new serviceaccount with no secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
testclient.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
},
|
||||
},
|
||||
"new serviceaccount with no secrets with unsynced secret store": {
|
||||
|
@ -197,20 +198,20 @@ func TestTokenCreation(t *testing.T) {
|
|||
SecretsSyncPending: true,
|
||||
|
||||
AddedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
testclient.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
},
|
||||
},
|
||||
"new serviceaccount with missing secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
testclient.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
|
||||
},
|
||||
},
|
||||
"new serviceaccount with missing secrets with unsynced secret store": {
|
||||
|
@ -219,16 +220,16 @@ func TestTokenCreation(t *testing.T) {
|
|||
SecretsSyncPending: true,
|
||||
|
||||
AddedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"new serviceaccount with non-token secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(regularSecretReferences()), createdTokenSecret(), opaqueSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(regularSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
testclient.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
|
||||
},
|
||||
},
|
||||
"new serviceaccount with token secrets": {
|
||||
|
@ -236,14 +237,14 @@ func TestTokenCreation(t *testing.T) {
|
|||
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"new serviceaccount with no secrets with resource conflict": {
|
||||
ClientObjects: []runtime.Object{updatedServiceAccount(emptySecretReferences()), createdTokenSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
},
|
||||
},
|
||||
|
||||
|
@ -251,10 +252,10 @@ func TestTokenCreation(t *testing.T) {
|
|||
ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
testclient.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with no secrets with unsynced secret store": {
|
||||
|
@ -263,20 +264,20 @@ func TestTokenCreation(t *testing.T) {
|
|||
SecretsSyncPending: true,
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
testclient.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with missing secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
testclient.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with missing secrets with unsynced secret store": {
|
||||
|
@ -285,54 +286,54 @@ func TestTokenCreation(t *testing.T) {
|
|||
SecretsSyncPending: true,
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"updated serviceaccount with non-token secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(regularSecretReferences()), createdTokenSecret(), opaqueSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(regularSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
testclient.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction("secrets", api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with token secrets": {
|
||||
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"updated serviceaccount with no secrets with resource conflict": {
|
||||
ClientObjects: []runtime.Object{updatedServiceAccount(emptySecretReferences()), createdTokenSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
},
|
||||
},
|
||||
|
||||
"deleted serviceaccount with no secrets": {
|
||||
DeletedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"deleted serviceaccount with missing secrets": {
|
||||
DeletedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"deleted serviceaccount with non-token secrets": {
|
||||
ClientObjects: []runtime.Object{opaqueSecret()},
|
||||
|
||||
DeletedServiceAccount: serviceAccount(regularSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"deleted serviceaccount with token secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecret()},
|
||||
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()},
|
||||
|
||||
DeletedServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewDeleteAction("secrets", api.NamespaceDefault, "token-secret-1"),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewDeleteAction("secrets", api.NamespaceDefault, "token-secret-1"),
|
||||
},
|
||||
},
|
||||
|
||||
|
@ -340,24 +341,24 @@ func TestTokenCreation(t *testing.T) {
|
|||
ClientObjects: []runtime.Object{serviceAccountTokenSecret()},
|
||||
|
||||
AddedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewDeleteAction("secrets", api.NamespaceDefault, "token-secret-1"),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewDeleteAction("secrets", api.NamespaceDefault, "token-secret-1"),
|
||||
},
|
||||
},
|
||||
"added secret with serviceaccount": {
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
AddedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"added token secret without token data": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutTokenData()},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithoutTokenData(),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"added token secret without ca data": {
|
||||
|
@ -365,8 +366,8 @@ func TestTokenCreation(t *testing.T) {
|
|||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithoutCAData(),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"added token secret with mismatched ca data": {
|
||||
|
@ -374,8 +375,8 @@ func TestTokenCreation(t *testing.T) {
|
|||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
|
||||
|
@ -383,24 +384,24 @@ func TestTokenCreation(t *testing.T) {
|
|||
ClientObjects: []runtime.Object{serviceAccountTokenSecret()},
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewDeleteAction("secrets", api.NamespaceDefault, "token-secret-1"),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewDeleteAction("secrets", api.NamespaceDefault, "token-secret-1"),
|
||||
},
|
||||
},
|
||||
"updated secret with serviceaccount": {
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"updated token secret without token data": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutTokenData()},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithoutTokenData(),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"updated token secret without ca data": {
|
||||
|
@ -408,8 +409,8 @@ func TestTokenCreation(t *testing.T) {
|
|||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithoutCAData(),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"updated token secret with mismatched ca data": {
|
||||
|
@ -417,30 +418,30 @@ func TestTokenCreation(t *testing.T) {
|
|||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewUpdateAction("secrets", api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
|
||||
"deleted secret without serviceaccount": {
|
||||
DeletedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
"deleted secret with serviceaccount with reference": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(tokenSecretReferences())},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
DeletedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.Action{
|
||||
testclient.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
testclient.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(emptySecretReferences())),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction("serviceaccounts", api.NamespaceDefault, "default"),
|
||||
core.NewUpdateAction("serviceaccounts", api.NamespaceDefault, serviceAccount(emptySecretReferences())),
|
||||
},
|
||||
},
|
||||
"deleted secret with serviceaccount without reference": {
|
||||
ExistingServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
|
||||
DeletedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.Action{},
|
||||
ExpectedActions: []core.Action{},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -451,7 +452,7 @@ func TestTokenCreation(t *testing.T) {
|
|||
|
||||
generator := &testGenerator{Token: "ABC"}
|
||||
|
||||
client := testclient.NewSimpleFake(tc.ClientObjects...)
|
||||
client := fake.NewSimpleClientset(tc.ClientObjects...)
|
||||
|
||||
controller := NewTokensController(client, TokensControllerOptions{TokenGenerator: generator, RootCA: []byte("CA Data")})
|
||||
|
||||
|
|
|
@ -20,14 +20,15 @@ package config
|
|||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// NewSourceApiserver creates a config source that watches and pulls from the apiserver.
|
||||
func NewSourceApiserver(c *client.Client, nodeName string, updates chan<- interface{}) {
|
||||
lw := cache.NewListWatchFromClient(c, "pods", api.NamespaceAll, fields.OneTermEqualSelector(client.PodHost, nodeName))
|
||||
func NewSourceApiserver(c *clientset.Clientset, nodeName string, updates chan<- interface{}) {
|
||||
lw := cache.NewListWatchFromClient(c.LegacyClient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(client.PodHost, nodeName))
|
||||
newSourceApiserverFromLW(lw, updates)
|
||||
}
|
||||
|
||||
|
|
|
@ -148,8 +148,7 @@ func NewMainKubelet(
|
|||
hostname string,
|
||||
nodeName string,
|
||||
dockerClient dockertools.DockerInterface,
|
||||
kubeClient client.Interface,
|
||||
clientset clientset.Interface,
|
||||
kubeClient clientset.Interface,
|
||||
rootDirectory string,
|
||||
podInfraContainerImage string,
|
||||
resyncInterval time.Duration,
|
||||
|
@ -220,10 +219,10 @@ func NewMainKubelet(
|
|||
// than an interface. There is no way to construct a list+watcher using resource name.
|
||||
listWatch := &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Services(api.NamespaceAll).List(options)
|
||||
return kubeClient.Legacy().Services(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Services(api.NamespaceAll).Watch(options)
|
||||
return kubeClient.Legacy().Services(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
}
|
||||
cache.NewReflector(listWatch, &api.Service{}, serviceStore, 0).Run()
|
||||
|
@ -238,11 +237,11 @@ func NewMainKubelet(
|
|||
listWatch := &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return kubeClient.Nodes().List(options)
|
||||
return kubeClient.Legacy().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return kubeClient.Nodes().Watch(options)
|
||||
return kubeClient.Legacy().Nodes().Watch(options)
|
||||
},
|
||||
}
|
||||
cache.NewReflector(listWatch, &api.Node{}, nodeStore, 0).Run()
|
||||
|
@ -275,7 +274,6 @@ func NewMainKubelet(
|
|||
nodeName: nodeName,
|
||||
dockerClient: dockerClient,
|
||||
kubeClient: kubeClient,
|
||||
clientset: clientset,
|
||||
rootDirectory: rootDirectory,
|
||||
resyncInterval: resyncInterval,
|
||||
containerRefManager: containerRefManager,
|
||||
|
@ -469,8 +467,7 @@ type Kubelet struct {
|
|||
nodeName string
|
||||
dockerClient dockertools.DockerInterface
|
||||
runtimeCache kubecontainer.RuntimeCache
|
||||
kubeClient client.Interface
|
||||
clientset clientset.Interface
|
||||
kubeClient clientset.Interface
|
||||
rootDirectory string
|
||||
podWorkers PodWorkers
|
||||
|
||||
|
@ -1055,12 +1052,12 @@ func (kl *Kubelet) registerWithApiserver() {
|
|||
continue
|
||||
}
|
||||
glog.V(2).Infof("Attempting to register node %s", node.Name)
|
||||
if _, err := kl.kubeClient.Nodes().Create(node); err != nil {
|
||||
if _, err := kl.kubeClient.Legacy().Nodes().Create(node); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
glog.V(2).Infof("Unable to register %s with the apiserver: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
currentNode, err := kl.kubeClient.Nodes().Get(kl.nodeName)
|
||||
currentNode, err := kl.kubeClient.Legacy().Nodes().Get(kl.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error getting node %q: %v", kl.nodeName, err)
|
||||
continue
|
||||
|
@ -1078,7 +1075,7 @@ func (kl *Kubelet) registerWithApiserver() {
|
|||
"Previously %q had externalID %q; now it is %q; will delete and recreate.",
|
||||
kl.nodeName, node.Spec.ExternalID, currentNode.Spec.ExternalID,
|
||||
)
|
||||
if err := kl.kubeClient.Nodes().Delete(node.Name); err != nil {
|
||||
if err := kl.kubeClient.Legacy().Nodes().Delete(node.Name, nil); err != nil {
|
||||
glog.Errorf("Unable to delete old node: %v", err)
|
||||
} else {
|
||||
glog.Errorf("Deleted old node object %q", kl.nodeName)
|
||||
|
@ -1410,7 +1407,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain
|
|||
key := envVar.ValueFrom.ConfigMapKeyRef.Key
|
||||
configMap, ok := configMaps[name]
|
||||
if !ok {
|
||||
configMap, err = kl.kubeClient.ConfigMaps(pod.Namespace).Get(name)
|
||||
configMap, err = kl.kubeClient.Legacy().ConfigMaps(pod.Namespace).Get(name)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
@ -1424,7 +1421,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain
|
|||
key := envVar.ValueFrom.SecretKeyRef.Key
|
||||
secret, ok := secrets[name]
|
||||
if !ok {
|
||||
secret, err = kl.kubeClient.Secrets(pod.Namespace).Get(name)
|
||||
secret, err = kl.kubeClient.Legacy().Secrets(pod.Namespace).Get(name)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
@ -1716,7 +1713,7 @@ func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) {
|
|||
pullSecrets := []api.Secret{}
|
||||
|
||||
for _, secretRef := range pod.Spec.ImagePullSecrets {
|
||||
secret, err := kl.kubeClient.Secrets(pod.Namespace).Get(secretRef.Name)
|
||||
secret, err := kl.kubeClient.Legacy().Secrets(pod.Namespace).Get(secretRef.Name)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
|
@ -2975,7 +2972,7 @@ func (kl *Kubelet) isContainerRuntimeVersionCompatible() error {
|
|||
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
|
||||
// is set, this function will also confirm that cbr0 is configured correctly.
|
||||
func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
node, err := kl.kubeClient.Nodes().Get(kl.nodeName)
|
||||
node, err := kl.kubeClient.Legacy().Nodes().Get(kl.nodeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
|
||||
}
|
||||
|
@ -2990,7 +2987,7 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||
if node.Spec.PodCIDR != flannelPodCIDR {
|
||||
node.Spec.PodCIDR = flannelPodCIDR
|
||||
glog.Infof("Updating podcidr to %v", node.Spec.PodCIDR)
|
||||
if updatedNode, err := kl.kubeClient.Nodes().Update(node); err != nil {
|
||||
if updatedNode, err := kl.kubeClient.Legacy().Nodes().Update(node); err != nil {
|
||||
glog.Warningf("Failed to update podCIDR: %v", err)
|
||||
} else {
|
||||
// Update the node resourceVersion so the status update doesn't fail.
|
||||
|
@ -3005,7 +3002,7 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||
return err
|
||||
}
|
||||
// Update the current status on the API server
|
||||
_, err = kl.kubeClient.Nodes().UpdateStatus(node)
|
||||
_, err = kl.kubeClient.Legacy().Nodes().UpdateStatus(node)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
|
@ -87,7 +89,7 @@ type TestKubelet struct {
|
|||
kubelet *Kubelet
|
||||
fakeRuntime *kubecontainer.FakeRuntime
|
||||
fakeCadvisor *cadvisor.Mock
|
||||
fakeKubeClient *testclient.Fake
|
||||
fakeKubeClient *fake.Clientset
|
||||
fakeMirrorClient *kubepod.FakeMirrorClient
|
||||
fakeClock *util.FakeClock
|
||||
}
|
||||
|
@ -108,7 +110,7 @@ func newTestKubelet(t *testing.T) *TestKubelet {
|
|||
},
|
||||
}
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeKubeClient := &testclient.Fake{}
|
||||
fakeKubeClient := &fake.Clientset{}
|
||||
kubelet := &Kubelet{}
|
||||
kubelet.kubeClient = fakeKubeClient
|
||||
kubelet.os = kubecontainer.FakeOS{}
|
||||
|
@ -2541,7 +2543,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||
fakeRuntime.RuntimeType = "docker"
|
||||
fakeRuntime.VersionInfo = "1.5.0"
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
|
||||
}}).ReactionChain
|
||||
machineInfo := &cadvisorapi.MachineInfo{
|
||||
|
@ -2664,7 +2666,7 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
|
|||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
|
||||
}}).ReactionChain
|
||||
machineInfo := &cadvisorapi.MachineInfo{
|
||||
|
@ -2743,7 +2745,7 @@ func TestDockerRuntimeVersion(t *testing.T) {
|
|||
fakeRuntime.VersionInfo = "1.10.0-rc1-fc24"
|
||||
fakeRuntime.APIVersionInfo = "1.22"
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: api.NodeSpec{},
|
||||
|
@ -2927,7 +2929,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||
fakeRuntime.RuntimeType = "docker"
|
||||
fakeRuntime.VersionInfo = "1.5.0"
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: api.NodeSpec{},
|
||||
|
@ -3086,7 +3088,7 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T)
|
|||
kubelet := testKubelet.kubelet
|
||||
clock := testKubelet.fakeClock
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: api.NodeSpec{},
|
||||
|
@ -3240,7 +3242,7 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
|||
fakeRuntime := testKubelet.fakeRuntime
|
||||
fakeRuntime.RuntimeType = "docker"
|
||||
fakeRuntime.VersionInfo = "1.5.0"
|
||||
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
|
||||
}}).ReactionChain
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
|
@ -3364,7 +3366,7 @@ func TestUpdateNodeStatusError(t *testing.T) {
|
|||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
// No matching node for the kubelet
|
||||
testKubelet.fakeKubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{}}).ReactionChain
|
||||
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{}}).ReactionChain
|
||||
|
||||
if err := kubelet.updateNodeStatus(); err == nil {
|
||||
t.Errorf("unexpected non error: %v", err)
|
||||
|
@ -3728,20 +3730,20 @@ func TestRegisterExistingNodeWithApiserver(t *testing.T) {
|
|||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.AddReactor("create", "nodes", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
// Return an error on create.
|
||||
return true, &api.Node{}, &apierrors.StatusError{
|
||||
ErrStatus: unversioned.Status{Reason: unversioned.StatusReasonAlreadyExists},
|
||||
}
|
||||
})
|
||||
kubeClient.AddReactor("get", "nodes", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
// Return an existing (matching) node on get.
|
||||
return true, &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
|
||||
}, nil
|
||||
})
|
||||
kubeClient.AddReactor("*", "*", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
||||
})
|
||||
machineInfo := &cadvisorapi.MachineInfo{
|
||||
|
|
|
@ -28,12 +28,13 @@ import (
|
|||
"testing"
|
||||
"text/template"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
|
@ -111,10 +112,10 @@ func tearDownPlugin(tmpDir string) {
|
|||
}
|
||||
|
||||
type fakeNetworkHost struct {
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
}
|
||||
|
||||
func NewFakeHost(kubeClient client.Interface) *fakeNetworkHost {
|
||||
func NewFakeHost(kubeClient clientset.Interface) *fakeNetworkHost {
|
||||
host := &fakeNetworkHost{kubeClient: kubeClient}
|
||||
return host
|
||||
}
|
||||
|
@ -123,7 +124,7 @@ func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*api.Pod, bool
|
|||
return nil, false
|
||||
}
|
||||
|
||||
func (fnh *fakeNetworkHost) GetKubeClient() client.Interface {
|
||||
func (fnh *fakeNetworkHost) GetKubeClient() clientset.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -21,10 +21,11 @@ import (
|
|||
"net"
|
||||
"strings"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/validation"
|
||||
|
@ -72,7 +73,7 @@ type Host interface {
|
|||
GetPodByName(namespace, name string) (*api.Pod, bool)
|
||||
|
||||
// GetKubeClient returns a client interface
|
||||
GetKubeClient() client.Interface
|
||||
GetKubeClient() clientset.Interface
|
||||
|
||||
// GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt)
|
||||
GetRuntime() kubecontainer.Runtime
|
||||
|
|
|
@ -21,15 +21,15 @@ package network
|
|||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
type fakeNetworkHost struct {
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
}
|
||||
|
||||
func NewFakeHost(kubeClient client.Interface) *fakeNetworkHost {
|
||||
func NewFakeHost(kubeClient clientset.Interface) *fakeNetworkHost {
|
||||
host := &fakeNetworkHost{kubeClient: kubeClient}
|
||||
return host
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*api.Pod, bool
|
|||
return nil, false
|
||||
}
|
||||
|
||||
func (fnh *fakeNetworkHost) GetKubeClient() client.Interface {
|
||||
func (fnh *fakeNetworkHost) GetKubeClient() clientset.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ package kubelet
|
|||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
|
@ -32,7 +32,7 @@ func (nh *networkHost) GetPodByName(name, namespace string) (*api.Pod, bool) {
|
|||
return nh.kubelet.GetPodByName(name, namespace)
|
||||
}
|
||||
|
||||
func (nh *networkHost) GetKubeClient() client.Interface {
|
||||
func (nh *networkHost) GetKubeClient() clientset.Interface {
|
||||
return nh.kubelet.kubeClient
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
@ -34,10 +34,10 @@ type MirrorClient interface {
|
|||
type basicMirrorClient struct {
|
||||
// mirror pods are stored in the kubelet directly because they need to be
|
||||
// in sync with the internal pods.
|
||||
apiserverClient client.Interface
|
||||
apiserverClient clientset.Interface
|
||||
}
|
||||
|
||||
func NewBasicMirrorClient(apiserverClient client.Interface) MirrorClient {
|
||||
func NewBasicMirrorClient(apiserverClient clientset.Interface) MirrorClient {
|
||||
return &basicMirrorClient{apiserverClient: apiserverClient}
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *api.Pod) error {
|
|||
}
|
||||
hash := getPodHash(pod)
|
||||
copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = hash
|
||||
apiPod, err := mc.apiserverClient.Pods(copyPod.Namespace).Create(©Pod)
|
||||
apiPod, err := mc.apiserverClient.Legacy().Pods(copyPod.Namespace).Create(©Pod)
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
// Check if the existing pod is the same as the pod we want to create.
|
||||
if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash {
|
||||
|
@ -76,7 +76,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error {
|
|||
return err
|
||||
}
|
||||
glog.V(4).Infof("Deleting a mirror pod %q", podFullName)
|
||||
if err := mc.apiserverClient.Pods(namespace).Delete(name, api.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
|
||||
if err := mc.apiserverClient.Legacy().Pods(namespace).Delete(name, api.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
|
||||
glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
|
@ -97,7 +97,7 @@ func newTestManager() *manager {
|
|||
refManager := kubecontainer.NewRefManager()
|
||||
refManager.SetRef(testContainerID, &api.ObjectReference{}) // Suppress prober warnings.
|
||||
m := NewManager(
|
||||
status.NewManager(&testclient.Fake{}, kubepod.NewBasicPodManager(nil)),
|
||||
status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil)),
|
||||
results.NewManager(),
|
||||
nil, // runner
|
||||
refManager,
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
|
@ -118,7 +118,7 @@ func TestDoProbe(t *testing.T) {
|
|||
}
|
||||
|
||||
// Clean up.
|
||||
m.statusManager = status.NewManager(&testclient.Fake{}, kubepod.NewBasicPodManager(nil))
|
||||
m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil))
|
||||
resultsManager(m, probeType).Remove(testContainerID)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,11 +21,12 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
|
@ -53,7 +54,7 @@ type podStatusSyncRequest struct {
|
|||
// Updates pod statuses in apiserver. Writes only when new status has changed.
|
||||
// All methods are thread-safe.
|
||||
type manager struct {
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
podManager kubepod.Manager
|
||||
// Map from pod UID to sync status of the corresponding pod.
|
||||
podStatuses map[types.UID]versionedPodStatus
|
||||
|
@ -93,7 +94,7 @@ type Manager interface {
|
|||
|
||||
const syncPeriod = 10 * time.Second
|
||||
|
||||
func NewManager(kubeClient client.Interface, podManager kubepod.Manager) Manager {
|
||||
func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager) Manager {
|
||||
return &manager{
|
||||
kubeClient: kubeClient,
|
||||
podManager: podManager,
|
||||
|
@ -346,7 +347,7 @@ func (m *manager) syncBatch() {
|
|||
// syncPod syncs the given status with the API server. The caller must not hold the lock.
|
||||
func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
|
||||
// TODO: make me easier to express from client code
|
||||
pod, err := m.kubeClient.Pods(status.podNamespace).Get(status.podName)
|
||||
pod, err := m.kubeClient.Legacy().Pods(status.podNamespace).Get(status.podName)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid)
|
||||
// If the Pod is deleted the status will be cleared in
|
||||
|
@ -366,7 +367,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
|
|||
}
|
||||
pod.Status = status.status
|
||||
// TODO: handle conflict as a retry, make that easier too.
|
||||
pod, err = m.kubeClient.Pods(pod.Namespace).UpdateStatus(pod)
|
||||
pod, err = m.kubeClient.Legacy().Pods(pod.Namespace).UpdateStatus(pod)
|
||||
if err == nil {
|
||||
glog.V(3).Infof("Status for pod %q updated successfully: %+v", format.Pod(pod), status)
|
||||
m.apiStatusVersions[pod.UID] = status.version
|
||||
|
@ -381,7 +382,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
|
|||
glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
|
||||
return
|
||||
}
|
||||
if err := m.kubeClient.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err == nil {
|
||||
if err := m.kubeClient.Legacy().Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err == nil {
|
||||
glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod))
|
||||
m.deletePodStatus(uid)
|
||||
return
|
||||
|
|
|
@ -23,13 +23,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
|
@ -65,7 +67,7 @@ func (m *manager) testSyncBatch() {
|
|||
m.syncBatch()
|
||||
}
|
||||
|
||||
func newTestManager(kubeClient client.Interface) *manager {
|
||||
func newTestManager(kubeClient clientset.Interface) *manager {
|
||||
podManager := kubepod.NewBasicPodManager(kubepod.NewFakeMirrorClient())
|
||||
podManager.AddPod(getTestPod())
|
||||
return NewManager(kubeClient, podManager).(*manager)
|
||||
|
@ -81,8 +83,8 @@ func getRandomPodStatus() api.PodStatus {
|
|||
}
|
||||
}
|
||||
|
||||
func verifyActions(t *testing.T, kubeClient client.Interface, expectedActions []testclient.Action) {
|
||||
actions := kubeClient.(*testclient.Fake).Actions()
|
||||
func verifyActions(t *testing.T, kubeClient clientset.Interface, expectedActions []core.Action) {
|
||||
actions := kubeClient.(*fake.Clientset).Actions()
|
||||
if len(actions) != len(expectedActions) {
|
||||
t.Fatalf("unexpected actions, got: %+v expected: %+v", actions, expectedActions)
|
||||
return
|
||||
|
@ -119,7 +121,7 @@ func verifyUpdates(t *testing.T, manager *manager, expectedUpdates int) {
|
|||
}
|
||||
|
||||
func TestNewStatus(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
verifyUpdates(t, syncer, 1)
|
||||
|
@ -131,7 +133,7 @@ func TestNewStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNewStatusPreservesPodStartTime(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
@ -163,7 +165,7 @@ func getReadyPodStatus() api.PodStatus {
|
|||
}
|
||||
|
||||
func TestNewStatusSetsReadyTransitionTime(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
podStatus := getReadyPodStatus()
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -183,7 +185,7 @@ func TestNewStatusSetsReadyTransitionTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestChangedStatus(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
|
@ -191,7 +193,7 @@ func TestChangedStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestChangedStatusKeepsStartTime(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
now := unversioned.Now()
|
||||
firstStatus := getRandomPodStatus()
|
||||
|
@ -209,7 +211,7 @@ func TestChangedStatusKeepsStartTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestChangedStatusUpdatesLastTransitionTime(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
podStatus := getReadyPodStatus()
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -239,7 +241,7 @@ func TestChangedStatusUpdatesLastTransitionTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestUnchangedStatus(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
podStatus := getRandomPodStatus()
|
||||
syncer.SetPodStatus(testPod, podStatus)
|
||||
|
@ -248,7 +250,7 @@ func TestUnchangedStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
podStatus := getReadyPodStatus()
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -278,61 +280,61 @@ func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSyncBatchIgnoresNotFound(t *testing.T) {
|
||||
client := testclient.Fake{}
|
||||
client := fake.Clientset{}
|
||||
syncer := newTestManager(&client)
|
||||
client.AddReactor("get", "pods", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
client.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.NewNotFound(api.Resource("pods"), "test-pod")
|
||||
})
|
||||
syncer.SetPodStatus(getTestPod(), getRandomPodStatus())
|
||||
syncer.testSyncBatch()
|
||||
|
||||
verifyActions(t, syncer.kubeClient, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
verifyActions(t, syncer.kubeClient, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncBatch(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
syncer.kubeClient = testclient.NewSimpleFake(testPod)
|
||||
syncer.kubeClient = fake.NewSimpleClientset(testPod)
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
syncer.testSyncBatch()
|
||||
verifyActions(t, syncer.kubeClient, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
testclient.UpdateActionImpl{ActionImpl: testclient.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
verifyActions(t, syncer.kubeClient, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestSyncBatchChecksMismatchedUID(t *testing.T) {
|
||||
syncer := newTestManager(&testclient.Fake{})
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
pod := getTestPod()
|
||||
pod.UID = "first"
|
||||
syncer.podManager.AddPod(pod)
|
||||
differentPod := getTestPod()
|
||||
differentPod.UID = "second"
|
||||
syncer.podManager.AddPod(differentPod)
|
||||
syncer.kubeClient = testclient.NewSimpleFake(pod)
|
||||
syncer.kubeClient = fake.NewSimpleClientset(pod)
|
||||
syncer.SetPodStatus(differentPod, getRandomPodStatus())
|
||||
syncer.testSyncBatch()
|
||||
verifyActions(t, syncer.kubeClient, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
verifyActions(t, syncer.kubeClient, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncBatchNoDeadlock(t *testing.T) {
|
||||
client := &testclient.Fake{}
|
||||
client := &fake.Clientset{}
|
||||
m := newTestManager(client)
|
||||
pod := getTestPod()
|
||||
|
||||
// Setup fake client.
|
||||
var ret api.Pod
|
||||
var err error
|
||||
client.AddReactor("*", "pods", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
client.AddReactor("*", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
switch action := action.(type) {
|
||||
case testclient.GetAction:
|
||||
case core.GetAction:
|
||||
assert.Equal(t, pod.Name, action.GetName(), "Unexpeted GetAction: %+v", action)
|
||||
case testclient.UpdateAction:
|
||||
case core.UpdateAction:
|
||||
assert.Equal(t, pod.Name, action.GetObject().(*api.Pod).Name, "Unexpeted UpdateAction: %+v", action)
|
||||
default:
|
||||
assert.Fail(t, "Unexpected Action: %+v", action)
|
||||
|
@ -342,15 +344,15 @@ func TestSyncBatchNoDeadlock(t *testing.T) {
|
|||
|
||||
pod.Status.ContainerStatuses = []api.ContainerStatus{{State: api.ContainerState{Running: &api.ContainerStateRunning{}}}}
|
||||
|
||||
getAction := testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}}
|
||||
updateAction := testclient.UpdateActionImpl{ActionImpl: testclient.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}}
|
||||
getAction := core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}}
|
||||
updateAction := core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}}
|
||||
|
||||
// Pod not found.
|
||||
ret = *pod
|
||||
err = errors.NewNotFound(api.Resource("pods"), pod.Name)
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, client, []testclient.Action{getAction})
|
||||
verifyActions(t, client, []core.Action{getAction})
|
||||
client.ClearActions()
|
||||
|
||||
// Pod was recreated.
|
||||
|
@ -358,21 +360,21 @@ func TestSyncBatchNoDeadlock(t *testing.T) {
|
|||
err = nil
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, client, []testclient.Action{getAction})
|
||||
verifyActions(t, client, []core.Action{getAction})
|
||||
client.ClearActions()
|
||||
|
||||
// Pod not deleted (success case).
|
||||
ret = *pod
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, client, []testclient.Action{getAction, updateAction})
|
||||
verifyActions(t, client, []core.Action{getAction, updateAction})
|
||||
client.ClearActions()
|
||||
|
||||
// Pod is terminated, but still running.
|
||||
pod.DeletionTimestamp = new(unversioned.Time)
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, client, []testclient.Action{getAction, updateAction})
|
||||
verifyActions(t, client, []core.Action{getAction, updateAction})
|
||||
client.ClearActions()
|
||||
|
||||
// Pod is terminated successfully.
|
||||
|
@ -380,20 +382,20 @@ func TestSyncBatchNoDeadlock(t *testing.T) {
|
|||
pod.Status.ContainerStatuses[0].State.Terminated = &api.ContainerStateTerminated{}
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, client, []testclient.Action{getAction, updateAction})
|
||||
verifyActions(t, client, []core.Action{getAction, updateAction})
|
||||
client.ClearActions()
|
||||
|
||||
// Error case.
|
||||
err = fmt.Errorf("intentional test error")
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, client, []testclient.Action{getAction})
|
||||
verifyActions(t, client, []core.Action{getAction})
|
||||
client.ClearActions()
|
||||
}
|
||||
|
||||
func TestStaleUpdates(t *testing.T) {
|
||||
pod := getTestPod()
|
||||
client := testclient.NewSimpleFake(pod)
|
||||
client := fake.NewSimpleClientset(pod)
|
||||
m := newTestManager(client)
|
||||
|
||||
status := api.PodStatus{Message: "initial status"}
|
||||
|
@ -406,16 +408,16 @@ func TestStaleUpdates(t *testing.T) {
|
|||
|
||||
t.Logf("First sync pushes latest status.")
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, m.kubeClient, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
testclient.UpdateActionImpl{ActionImpl: testclient.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
verifyActions(t, m.kubeClient, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
})
|
||||
client.ClearActions()
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
t.Logf("Next 2 syncs should be ignored (%d).", i)
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, m.kubeClient, []testclient.Action{})
|
||||
verifyActions(t, m.kubeClient, []core.Action{})
|
||||
}
|
||||
|
||||
t.Log("Unchanged status should not send an update.")
|
||||
|
@ -427,9 +429,9 @@ func TestStaleUpdates(t *testing.T) {
|
|||
|
||||
m.SetPodStatus(pod, status)
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, m.kubeClient, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
testclient.UpdateActionImpl{ActionImpl: testclient.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
verifyActions(t, m.kubeClient, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
})
|
||||
|
||||
// Nothing stuck in the pipe.
|
||||
|
@ -477,7 +479,7 @@ func TestStaticPodStatus(t *testing.T) {
|
|||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
}
|
||||
client := testclient.NewSimpleFake(mirrorPod)
|
||||
client := fake.NewSimpleClientset(mirrorPod)
|
||||
m := newTestManager(client)
|
||||
m.podManager.AddPod(staticPod)
|
||||
m.podManager.AddPod(mirrorPod)
|
||||
|
@ -497,11 +499,11 @@ func TestStaticPodStatus(t *testing.T) {
|
|||
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||
// Should translate mirrorPod / staticPod UID.
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, m.kubeClient, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
testclient.UpdateActionImpl{ActionImpl: testclient.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
verifyActions(t, m.kubeClient, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
})
|
||||
updateAction := client.Actions()[1].(testclient.UpdateActionImpl)
|
||||
updateAction := client.Actions()[1].(core.UpdateActionImpl)
|
||||
updatedPod := updateAction.Object.(*api.Pod)
|
||||
assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID)
|
||||
assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status)
|
||||
|
@ -509,7 +511,7 @@ func TestStaticPodStatus(t *testing.T) {
|
|||
|
||||
// No changes.
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, m.kubeClient, []testclient.Action{})
|
||||
verifyActions(t, m.kubeClient, []core.Action{})
|
||||
|
||||
// Mirror pod identity changes.
|
||||
m.podManager.DeletePod(mirrorPod)
|
||||
|
@ -518,11 +520,11 @@ func TestStaticPodStatus(t *testing.T) {
|
|||
m.podManager.AddPod(mirrorPod)
|
||||
// Expect update to new mirrorPod.
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, m.kubeClient, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
testclient.UpdateActionImpl{ActionImpl: testclient.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
verifyActions(t, m.kubeClient, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
})
|
||||
updateAction = client.Actions()[1].(testclient.UpdateActionImpl)
|
||||
updateAction = client.Actions()[1].(core.UpdateActionImpl)
|
||||
updatedPod = updateAction.Object.(*api.Pod)
|
||||
assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID)
|
||||
assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status)
|
||||
|
@ -575,7 +577,7 @@ func TestSetContainerReadiness(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
m := newTestManager(&testclient.Fake{})
|
||||
m := newTestManager(&fake.Clientset{})
|
||||
|
||||
t.Log("Setting readiness before status should fail.")
|
||||
m.SetContainerReadiness(pod, cID1, true)
|
||||
|
@ -616,7 +618,7 @@ func TestSetContainerReadiness(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSyncBatchCleanupVersions(t *testing.T) {
|
||||
m := newTestManager(&testclient.Fake{})
|
||||
m := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
mirrorPod := getTestPod()
|
||||
mirrorPod.UID = "mirror-uid"
|
||||
|
@ -657,7 +659,7 @@ func TestSyncBatchCleanupVersions(t *testing.T) {
|
|||
|
||||
func TestReconcilePodStatus(t *testing.T) {
|
||||
testPod := getTestPod()
|
||||
client := testclient.NewSimpleFake(testPod)
|
||||
client := fake.NewSimpleClientset(testPod)
|
||||
syncer := newTestManager(client)
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
// Call syncBatch directly to test reconcile
|
||||
|
@ -677,7 +679,7 @@ func TestReconcilePodStatus(t *testing.T) {
|
|||
}
|
||||
client.ClearActions()
|
||||
syncer.syncBatch()
|
||||
verifyActions(t, client, []testclient.Action{})
|
||||
verifyActions(t, client, []core.Action{})
|
||||
|
||||
// If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond),
|
||||
// a reconciliation is not needed, syncBatch should do nothing.
|
||||
|
@ -691,7 +693,7 @@ func TestReconcilePodStatus(t *testing.T) {
|
|||
}
|
||||
client.ClearActions()
|
||||
syncer.syncBatch()
|
||||
verifyActions(t, client, []testclient.Action{})
|
||||
verifyActions(t, client, []core.Action{})
|
||||
|
||||
// If the pod status is different, a reconciliation is needed, syncBatch should trigger an update
|
||||
testPod.Status = getRandomPodStatus()
|
||||
|
@ -701,9 +703,9 @@ func TestReconcilePodStatus(t *testing.T) {
|
|||
}
|
||||
client.ClearActions()
|
||||
syncer.syncBatch()
|
||||
verifyActions(t, client, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
testclient.UpdateActionImpl{ActionImpl: testclient.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
verifyActions(t, client, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -719,7 +721,7 @@ func TestDeletePods(t *testing.T) {
|
|||
pod := getTestPod()
|
||||
// Set the deletion timestamp.
|
||||
pod.DeletionTimestamp = new(unversioned.Time)
|
||||
client := testclient.NewSimpleFake(pod)
|
||||
client := fake.NewSimpleClientset(pod)
|
||||
m := newTestManager(client)
|
||||
m.podManager.AddPod(pod)
|
||||
|
||||
|
@ -730,10 +732,10 @@ func TestDeletePods(t *testing.T) {
|
|||
|
||||
m.testSyncBatch()
|
||||
// Expect to see an delete action.
|
||||
verifyActions(t, m.kubeClient, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
testclient.UpdateActionImpl{ActionImpl: testclient.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
testclient.DeleteActionImpl{ActionImpl: testclient.ActionImpl{Verb: "delete", Resource: "pods"}},
|
||||
verifyActions(t, m.kubeClient, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
core.DeleteActionImpl{ActionImpl: core.ActionImpl{Verb: "delete", Resource: "pods"}},
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -748,7 +750,7 @@ func TestDoNotDeleteMirrorPods(t *testing.T) {
|
|||
}
|
||||
// Set the deletion timestamp.
|
||||
mirrorPod.DeletionTimestamp = new(unversioned.Time)
|
||||
client := testclient.NewSimpleFake(mirrorPod)
|
||||
client := fake.NewSimpleClientset(mirrorPod)
|
||||
m := newTestManager(client)
|
||||
m.podManager.AddPod(staticPod)
|
||||
m.podManager.AddPod(mirrorPod)
|
||||
|
@ -764,8 +766,8 @@ func TestDoNotDeleteMirrorPods(t *testing.T) {
|
|||
|
||||
m.testSyncBatch()
|
||||
// Expect not to see an delete action.
|
||||
verifyActions(t, m.kubeClient, []testclient.Action{
|
||||
testclient.GetActionImpl{ActionImpl: testclient.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
testclient.UpdateActionImpl{ActionImpl: testclient.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
verifyActions(t, m.kubeClient, []core.Action{
|
||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
|
||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func (vh *volumeHost) GetPodPluginDir(podUID types.UID, pluginName string) strin
|
|||
}
|
||||
|
||||
func (vh *volumeHost) GetKubeClient() clientset.Interface {
|
||||
return vh.kubelet.clientset
|
||||
return vh.kubelet.kubeClient
|
||||
}
|
||||
|
||||
func (vh *volumeHost) NewWrapperBuilder(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
kubeletapp "k8s.io/kubernetes/cmd/kubelet/app"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
|
@ -38,7 +38,7 @@ type HollowKubelet struct {
|
|||
|
||||
func NewHollowKubelet(
|
||||
nodeName string,
|
||||
client *client.Client,
|
||||
client *clientset.Clientset,
|
||||
cadvisorInterface cadvisor.Interface,
|
||||
dockerClient dockertools.DockerInterface,
|
||||
kubeletPort, kubeletReadOnlyPort int,
|
||||
|
|
|
@ -26,8 +26,8 @@ import (
|
|||
"github.com/dgrijalva/jwt-go"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
)
|
||||
|
@ -159,7 +159,7 @@ func TestTokenGenerateAndValidate(t *testing.T) {
|
|||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
Client client.Interface
|
||||
Client clientset.Interface
|
||||
Keys []*rsa.PublicKey
|
||||
|
||||
ExpectedErr bool
|
||||
|
@ -199,7 +199,7 @@ func TestTokenGenerateAndValidate(t *testing.T) {
|
|||
ExpectedGroups: []string{"system:serviceaccounts", "system:serviceaccounts:test"},
|
||||
},
|
||||
"valid lookup": {
|
||||
Client: testclient.NewSimpleFake(serviceAccount, secret),
|
||||
Client: fake.NewSimpleClientset(serviceAccount, secret),
|
||||
Keys: []*rsa.PublicKey{getPublicKey(publicKey)},
|
||||
ExpectedErr: false,
|
||||
ExpectedOK: true,
|
||||
|
@ -208,13 +208,13 @@ func TestTokenGenerateAndValidate(t *testing.T) {
|
|||
ExpectedGroups: []string{"system:serviceaccounts", "system:serviceaccounts:test"},
|
||||
},
|
||||
"invalid secret lookup": {
|
||||
Client: testclient.NewSimpleFake(serviceAccount),
|
||||
Client: fake.NewSimpleClientset(serviceAccount),
|
||||
Keys: []*rsa.PublicKey{getPublicKey(publicKey)},
|
||||
ExpectedErr: true,
|
||||
ExpectedOK: false,
|
||||
},
|
||||
"invalid serviceaccount lookup": {
|
||||
Client: testclient.NewSimpleFake(secret),
|
||||
Client: fake.NewSimpleClientset(secret),
|
||||
Keys: []*rsa.PublicKey{getPublicKey(publicKey)},
|
||||
ExpectedErr: true,
|
||||
ExpectedOK: false,
|
||||
|
|
|
@ -19,12 +19,13 @@ package admit
|
|||
import (
|
||||
"io"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("AlwaysAdmit", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("AlwaysAdmit", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewAlwaysAdmit(), nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -27,14 +27,15 @@ package alwayspullimages
|
|||
import (
|
||||
"io"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("AlwaysPullImages", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("AlwaysPullImages", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewAlwaysPullImages(), nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -20,12 +20,13 @@ import (
|
|||
"errors"
|
||||
"io"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("AlwaysDeny", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("AlwaysDeny", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewAlwaysDeny(), nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -20,21 +20,22 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/rest"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("DenyEscalatingExec", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("DenyEscalatingExec", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewDenyEscalatingExec(client), nil
|
||||
})
|
||||
|
||||
// This is for legacy support of the DenyExecOnPrivileged admission controller. Most
|
||||
// of the time DenyEscalatingExec should be preferred.
|
||||
admission.RegisterPlugin("DenyExecOnPrivileged", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("DenyExecOnPrivileged", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewDenyExecOnPrivileged(client), nil
|
||||
})
|
||||
}
|
||||
|
@ -43,7 +44,7 @@ func init() {
|
|||
// a pod using host based configurations.
|
||||
type denyExec struct {
|
||||
*admission.Handler
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
|
||||
// these flags control which items will be checked to deny exec/attach
|
||||
hostIPC bool
|
||||
|
@ -53,7 +54,7 @@ type denyExec struct {
|
|||
|
||||
// NewDenyEscalatingExec creates a new admission controller that denies an exec operation on a pod
|
||||
// using host based configurations.
|
||||
func NewDenyEscalatingExec(client client.Interface) admission.Interface {
|
||||
func NewDenyEscalatingExec(client clientset.Interface) admission.Interface {
|
||||
return &denyExec{
|
||||
Handler: admission.NewHandler(admission.Connect),
|
||||
client: client,
|
||||
|
@ -66,7 +67,7 @@ func NewDenyEscalatingExec(client client.Interface) admission.Interface {
|
|||
// NewDenyExecOnPrivileged creates a new admission controller that is only checking the privileged
|
||||
// option. This is for legacy support of the DenyExecOnPrivileged admission controller. Most
|
||||
// of the time NewDenyEscalatingExec should be preferred.
|
||||
func NewDenyExecOnPrivileged(client client.Interface) admission.Interface {
|
||||
func NewDenyExecOnPrivileged(client clientset.Interface) admission.Interface {
|
||||
return &denyExec{
|
||||
Handler: admission.NewHandler(admission.Connect),
|
||||
client: client,
|
||||
|
@ -85,7 +86,7 @@ func (d *denyExec) Admit(a admission.Attributes) (err error) {
|
|||
if connectRequest.ResourcePath != "pods/exec" && connectRequest.ResourcePath != "pods/attach" {
|
||||
return nil
|
||||
}
|
||||
pod, err := d.client.Pods(a.GetNamespace()).Get(connectRequest.Name)
|
||||
pod, err := d.client.Legacy().Pods(a.GetNamespace()).Get(connectRequest.Name)
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, err)
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/rest"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
@ -87,8 +89,8 @@ func TestAdmission(t *testing.T) {
|
|||
}
|
||||
|
||||
func testAdmission(t *testing.T, pod *api.Pod, handler *denyExec, shouldAccept bool) {
|
||||
mockClient := &testclient.Fake{}
|
||||
mockClient.AddReactor("get", "pods", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
mockClient := &fake.Clientset{}
|
||||
mockClient.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.(testclient.GetAction).GetName() == pod.Name {
|
||||
return true, pod, nil
|
||||
}
|
||||
|
|
|
@ -23,12 +23,13 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -46,7 +47,7 @@ const (
|
|||
|
||||
// WARNING: this feature is experimental and will definitely change.
|
||||
func init() {
|
||||
admission.RegisterPlugin("InitialResources", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("InitialResources", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
s, err := newDataSource(*source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -22,12 +22,13 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
@ -38,7 +39,7 @@ const (
|
|||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("LimitRanger", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("LimitRanger", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewLimitRanger(client, Limit), nil
|
||||
})
|
||||
}
|
||||
|
@ -46,7 +47,7 @@ func init() {
|
|||
// limitRanger enforces usage limits on a per resource basis in the namespace
|
||||
type limitRanger struct {
|
||||
*admission.Handler
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
limitFunc LimitFunc
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
@ -94,13 +95,13 @@ func (l *limitRanger) Admit(a admission.Attributes) (err error) {
|
|||
}
|
||||
|
||||
// NewLimitRanger returns an object that enforces limits based on the supplied limit function
|
||||
func NewLimitRanger(client client.Interface, limitFunc LimitFunc) admission.Interface {
|
||||
func NewLimitRanger(client clientset.Interface, limitFunc LimitFunc) admission.Interface {
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.LimitRanges(api.NamespaceAll).List(options)
|
||||
return client.Legacy().LimitRanges(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return client.LimitRanges(api.NamespaceAll).Watch(options)
|
||||
return client.Legacy().LimitRanges(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
}
|
||||
indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.LimitRange{}, 0)
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
)
|
||||
|
||||
func getResourceList(cpu, memory string) api.ResourceList {
|
||||
|
@ -429,7 +429,7 @@ func TestPodLimitFuncApplyDefault(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLimitRangerIgnoresSubresource(t *testing.T) {
|
||||
client := testclient.NewSimpleFake()
|
||||
client := fake.NewSimpleClientset()
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := &limitRanger{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
|
|
|
@ -19,17 +19,18 @@ package autoprovision
|
|||
import (
|
||||
"io"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("NamespaceAutoProvision", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("NamespaceAutoProvision", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewProvision(client), nil
|
||||
})
|
||||
}
|
||||
|
@ -39,7 +40,7 @@ func init() {
|
|||
// It is useful in deployments that do not want to restrict creation of a namespace prior to its usage.
|
||||
type provision struct {
|
||||
*admission.Handler
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
store cache.Store
|
||||
}
|
||||
|
||||
|
@ -65,7 +66,7 @@ func (p *provision) Admit(a admission.Attributes) (err error) {
|
|||
if exists {
|
||||
return nil
|
||||
}
|
||||
_, err = p.client.Namespaces().Create(namespace)
|
||||
_, err = p.client.Legacy().Namespaces().Create(namespace)
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
return admission.NewForbidden(a, err)
|
||||
}
|
||||
|
@ -73,15 +74,15 @@ func (p *provision) Admit(a admission.Attributes) (err error) {
|
|||
}
|
||||
|
||||
// NewProvision creates a new namespace provision admission control handler
|
||||
func NewProvision(c client.Interface) admission.Interface {
|
||||
func NewProvision(c clientset.Interface) admission.Interface {
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
reflector := cache.NewReflector(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return c.Namespaces().List(options)
|
||||
return c.Legacy().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.Namespaces().Watch(options)
|
||||
return c.Legacy().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
|
@ -92,7 +93,7 @@ func NewProvision(c client.Interface) admission.Interface {
|
|||
return createProvision(c, store)
|
||||
}
|
||||
|
||||
func createProvision(c client.Interface, store cache.Store) admission.Interface {
|
||||
func createProvision(c clientset.Interface, store cache.Store) admission.Interface {
|
||||
return &provision{
|
||||
Handler: admission.NewHandler(admission.Create),
|
||||
client: c,
|
||||
|
|
|
@ -23,14 +23,15 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
// TestAdmission verifies a namespace is created on create requests for namespace managed resources
|
||||
func TestAdmission(t *testing.T) {
|
||||
namespace := "test"
|
||||
mockClient := &testclient.Fake{}
|
||||
mockClient := &fake.Clientset{}
|
||||
handler := &provision{
|
||||
client: mockClient,
|
||||
store: cache.NewStore(cache.MetaNamespaceKeyFunc),
|
||||
|
@ -58,7 +59,7 @@ func TestAdmission(t *testing.T) {
|
|||
// TestAdmissionNamespaceExists verifies that no client call is made when a namespace already exists
|
||||
func TestAdmissionNamespaceExists(t *testing.T) {
|
||||
namespace := "test"
|
||||
mockClient := &testclient.Fake{}
|
||||
mockClient := &fake.Clientset{}
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
store.Add(&api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{Name: namespace},
|
||||
|
@ -86,7 +87,7 @@ func TestAdmissionNamespaceExists(t *testing.T) {
|
|||
// TestIgnoreAdmission validates that a request is ignored if its not a create
|
||||
func TestIgnoreAdmission(t *testing.T) {
|
||||
namespace := "test"
|
||||
mockClient := &testclient.Fake{}
|
||||
mockClient := &fake.Clientset{}
|
||||
handler := admission.NewChainHandler(createProvision(mockClient, nil))
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
|
@ -107,8 +108,8 @@ func TestIgnoreAdmission(t *testing.T) {
|
|||
// TestAdmissionNamespaceExistsUnknownToHandler
|
||||
func TestAdmissionNamespaceExistsUnknownToHandler(t *testing.T) {
|
||||
namespace := "test"
|
||||
mockClient := &testclient.Fake{}
|
||||
mockClient.AddReactor("create", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
mockClient := &fake.Clientset{}
|
||||
mockClient.AddReactor("create", "namespaces", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.NewAlreadyExists(api.Resource("namespaces"), namespace)
|
||||
})
|
||||
|
||||
|
|
|
@ -20,17 +20,18 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("NamespaceExists", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("NamespaceExists", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewExists(client), nil
|
||||
})
|
||||
}
|
||||
|
@ -40,7 +41,7 @@ func init() {
|
|||
// It is useful in deployments that want to enforce pre-declaration of a Namespace resource.
|
||||
type exists struct {
|
||||
*admission.Handler
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
store cache.Store
|
||||
}
|
||||
|
||||
|
@ -68,7 +69,7 @@ func (e *exists) Admit(a admission.Attributes) (err error) {
|
|||
}
|
||||
|
||||
// in case of latency in our caches, make a call direct to storage to verify that it truly exists or not
|
||||
_, err = e.client.Namespaces().Get(a.GetNamespace())
|
||||
_, err = e.client.Legacy().Namespaces().Get(a.GetNamespace())
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return err
|
||||
|
@ -80,15 +81,15 @@ func (e *exists) Admit(a admission.Attributes) (err error) {
|
|||
}
|
||||
|
||||
// NewExists creates a new namespace exists admission control handler
|
||||
func NewExists(c client.Interface) admission.Interface {
|
||||
func NewExists(c clientset.Interface) admission.Interface {
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
reflector := cache.NewReflector(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return c.Namespaces().List(options)
|
||||
return c.Legacy().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.Namespaces().Watch(options)
|
||||
return c.Legacy().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
|
|
|
@ -21,18 +21,19 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("NamespaceLifecycle", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("NamespaceLifecycle", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewLifecycle(client), nil
|
||||
})
|
||||
}
|
||||
|
@ -41,7 +42,7 @@ func init() {
|
|||
// It enforces life-cycle constraints around a Namespace depending on its Phase
|
||||
type lifecycle struct {
|
||||
*admission.Handler
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
store cache.Store
|
||||
immortalNamespaces sets.String
|
||||
}
|
||||
|
@ -72,7 +73,7 @@ func (l *lifecycle) Admit(a admission.Attributes) (err error) {
|
|||
// refuse to operate on non-existent namespaces
|
||||
if !exists {
|
||||
// in case of latency in our caches, make a call direct to storage to verify that it truly exists or not
|
||||
namespaceObj, err = l.client.Namespaces().Get(a.GetNamespace())
|
||||
namespaceObj, err = l.client.Legacy().Namespaces().Get(a.GetNamespace())
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return err
|
||||
|
@ -96,15 +97,15 @@ func (l *lifecycle) Admit(a admission.Attributes) (err error) {
|
|||
}
|
||||
|
||||
// NewLifecycle creates a new namespace lifecycle admission control handler
|
||||
func NewLifecycle(c client.Interface) admission.Interface {
|
||||
func NewLifecycle(c clientset.Interface) admission.Interface {
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
reflector := cache.NewReflector(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return c.Namespaces().List(options)
|
||||
return c.Legacy().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.Namespaces().Watch(options)
|
||||
return c.Legacy().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
|
|
|
@ -24,6 +24,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
@ -43,8 +45,8 @@ func TestAdmission(t *testing.T) {
|
|||
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
store.Add(namespaceObj)
|
||||
mockClient := testclient.NewSimpleFake()
|
||||
mockClient.PrependReactor("get", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
mockClient := fake.NewSimpleClientset()
|
||||
mockClient.PrependReactor("get", "namespaces", func(action core.Action) (bool, runtime.Object, error) {
|
||||
namespaceLock.RLock()
|
||||
defer namespaceLock.RUnlock()
|
||||
if getAction, ok := action.(testclient.GetAction); ok && getAction.GetName() == namespaceObj.Name {
|
||||
|
@ -52,7 +54,7 @@ func TestAdmission(t *testing.T) {
|
|||
}
|
||||
return true, nil, fmt.Errorf("No result for action %v", action)
|
||||
})
|
||||
mockClient.PrependReactor("list", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
mockClient.PrependReactor("list", "namespaces", func(action core.Action) (bool, runtime.Object, error) {
|
||||
namespaceLock.RLock()
|
||||
defer namespaceLock.RUnlock()
|
||||
return true, &api.NamespaceList{Items: []api.Namespace{*namespaceObj}}, nil
|
||||
|
|
|
@ -21,16 +21,17 @@ import (
|
|||
"io"
|
||||
"sync"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("PersistentVolumeLabel", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("PersistentVolumeLabel", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
persistentVolumeLabelAdmission := NewPersistentVolumeLabel()
|
||||
return persistentVolumeLabelAdmission, nil
|
||||
})
|
||||
|
|
|
@ -22,12 +22,13 @@ import (
|
|||
"math/rand"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
|
@ -35,25 +36,25 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("ResourceQuota", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("ResourceQuota", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewResourceQuota(client), nil
|
||||
})
|
||||
}
|
||||
|
||||
type quota struct {
|
||||
*admission.Handler
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewResourceQuota creates a new resource quota admission control handler
|
||||
func NewResourceQuota(client client.Interface) admission.Interface {
|
||||
func NewResourceQuota(client clientset.Interface) admission.Interface {
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.ResourceQuotas(api.NamespaceAll).List(options)
|
||||
return client.Legacy().ResourceQuotas(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return client.ResourceQuotas(api.NamespaceAll).Watch(options)
|
||||
return client.Legacy().ResourceQuotas(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
}
|
||||
indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)
|
||||
|
@ -61,7 +62,7 @@ func NewResourceQuota(client client.Interface) admission.Interface {
|
|||
return createResourceQuota(client, indexer)
|
||||
}
|
||||
|
||||
func createResourceQuota(client client.Interface, indexer cache.Indexer) admission.Interface {
|
||||
func createResourceQuota(client clientset.Interface, indexer cache.Indexer) admission.Interface {
|
||||
return "a{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: client,
|
||||
|
@ -142,7 +143,7 @@ func (q *quota) Admit(a admission.Attributes) (err error) {
|
|||
Annotations: quota.Annotations},
|
||||
}
|
||||
usage.Status = *status
|
||||
_, err = q.client.ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
_, err = q.client.Legacy().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
@ -153,7 +154,7 @@ func (q *quota) Admit(a admission.Attributes) (err error) {
|
|||
}
|
||||
time.Sleep(interval)
|
||||
// manually get the latest quota
|
||||
quota, err = q.client.ResourceQuotas(usage.Namespace).Get(quota.Name)
|
||||
quota, err = q.client.Legacy().ResourceQuotas(usage.Namespace).Get(quota.Name)
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, err)
|
||||
}
|
||||
|
@ -166,7 +167,7 @@ func (q *quota) Admit(a admission.Attributes) (err error) {
|
|||
// IncrementUsage updates the supplied ResourceQuotaStatus object based on the incoming operation
|
||||
// Return true if the usage must be recorded prior to admitting the new resource
|
||||
// Return an error if the operation should not pass admission control
|
||||
func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client client.Interface) (bool, error) {
|
||||
func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client clientset.Interface) (bool, error) {
|
||||
// on update, the only resource that can modify the value of a quota is pods
|
||||
// so if your not a pod, we exit quickly
|
||||
if a.GetOperation() == admission.Update && a.GetResource() != api.Resource("pods") {
|
||||
|
@ -227,7 +228,7 @@ func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, cli
|
|||
|
||||
// if this operation is an update, we need to find the delta usage from the previous state
|
||||
if a.GetOperation() == admission.Update {
|
||||
oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name)
|
||||
oldPod, err := client.Legacy().Pods(a.GetNamespace()).Get(pod.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
@ -65,7 +65,7 @@ func validPod(name string, numContainers int, resources api.ResourceRequirements
|
|||
|
||||
func TestAdmissionIgnoresDelete(t *testing.T) {
|
||||
namespace := "default"
|
||||
handler := createResourceQuota(&testclient.Fake{}, nil)
|
||||
handler := createResourceQuota(&fake.Clientset{}, nil)
|
||||
err := handler.Admit(admission.NewAttributesRecord(nil, api.Kind("Pod"), namespace, "name", api.Resource("pods"), "", admission.Delete, nil))
|
||||
if err != nil {
|
||||
t.Errorf("ResourceQuota should admit all deletes: %v", err)
|
||||
|
@ -74,7 +74,7 @@ func TestAdmissionIgnoresDelete(t *testing.T) {
|
|||
|
||||
func TestAdmissionIgnoresSubresources(t *testing.T) {
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
handler := createResourceQuota(&testclient.Fake{}, indexer)
|
||||
handler := createResourceQuota(&fake.Clientset{}, indexer)
|
||||
|
||||
quota := &api.ResourceQuota{}
|
||||
quota.Name = "quota"
|
||||
|
@ -173,7 +173,7 @@ func TestIncrementUsagePodResources(t *testing.T) {
|
|||
}
|
||||
for _, item := range testCases {
|
||||
podList := &api.PodList{Items: []api.Pod{*item.existing}}
|
||||
client := testclient.NewSimpleFake(podList)
|
||||
client := fake.NewSimpleClientset(podList)
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
|
@ -207,7 +207,7 @@ func TestIncrementUsagePodResources(t *testing.T) {
|
|||
func TestIncrementUsagePods(t *testing.T) {
|
||||
pod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")))
|
||||
podList := &api.PodList{Items: []api.Pod{*pod}}
|
||||
client := testclient.NewSimpleFake(podList)
|
||||
client := fake.NewSimpleClientset(podList)
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
|
@ -231,7 +231,7 @@ func TestIncrementUsagePods(t *testing.T) {
|
|||
func TestExceedUsagePods(t *testing.T) {
|
||||
pod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")))
|
||||
podList := &api.PodList{Items: []api.Pod{*pod}}
|
||||
client := testclient.NewSimpleFake(podList)
|
||||
client := fake.NewSimpleClientset(podList)
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
|
@ -247,7 +247,7 @@ func TestExceedUsagePods(t *testing.T) {
|
|||
|
||||
func TestIncrementUsageServices(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := testclient.NewSimpleFake(&api.ServiceList{
|
||||
client := fake.NewSimpleClientset(&api.ServiceList{
|
||||
Items: []api.Service{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
|
@ -276,7 +276,7 @@ func TestIncrementUsageServices(t *testing.T) {
|
|||
|
||||
func TestExceedUsageServices(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := testclient.NewSimpleFake(&api.ServiceList{
|
||||
client := fake.NewSimpleClientset(&api.ServiceList{
|
||||
Items: []api.Service{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
|
@ -298,7 +298,7 @@ func TestExceedUsageServices(t *testing.T) {
|
|||
|
||||
func TestIncrementUsageReplicationControllers(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := testclient.NewSimpleFake(&api.ReplicationControllerList{
|
||||
client := fake.NewSimpleClientset(&api.ReplicationControllerList{
|
||||
Items: []api.ReplicationController{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
|
@ -327,7 +327,7 @@ func TestIncrementUsageReplicationControllers(t *testing.T) {
|
|||
|
||||
func TestExceedUsageReplicationControllers(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := testclient.NewSimpleFake(&api.ReplicationControllerList{
|
||||
client := fake.NewSimpleClientset(&api.ReplicationControllerList{
|
||||
Items: []api.ReplicationController{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
|
@ -349,7 +349,7 @@ func TestExceedUsageReplicationControllers(t *testing.T) {
|
|||
|
||||
func TestExceedUsageSecrets(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := testclient.NewSimpleFake(&api.SecretList{
|
||||
client := fake.NewSimpleClientset(&api.SecretList{
|
||||
Items: []api.Secret{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
|
@ -371,7 +371,7 @@ func TestExceedUsageSecrets(t *testing.T) {
|
|||
|
||||
func TestExceedUsagePersistentVolumeClaims(t *testing.T) {
|
||||
namespace := "default"
|
||||
client := testclient.NewSimpleFake(&api.PersistentVolumeClaimList{
|
||||
client := fake.NewSimpleClientset(&api.PersistentVolumeClaimList{
|
||||
Items: []api.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
|
@ -426,7 +426,7 @@ func TestIncrementUsageOnUpdateIgnoresNonPodResources(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCase {
|
||||
client := testclient.NewSimpleFake()
|
||||
client := fake.NewSimpleClientset()
|
||||
status := &api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
|
|
|
@ -20,14 +20,15 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin("SecurityContextDeny", func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin("SecurityContextDeny", func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
return NewSecurityContextDeny(client), nil
|
||||
})
|
||||
}
|
||||
|
@ -35,11 +36,11 @@ func init() {
|
|||
// plugin contains the client used by the SecurityContextDeny admission controller
|
||||
type plugin struct {
|
||||
*admission.Handler
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
}
|
||||
|
||||
// NewSecurityContextDeny creates a new instance of the SecurityContextDeny admission controller
|
||||
func NewSecurityContextDeny(client client.Interface) admission.Interface {
|
||||
func NewSecurityContextDeny(client clientset.Interface) admission.Interface {
|
||||
return &plugin{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
client: client,
|
||||
|
|
|
@ -23,6 +23,8 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
|
@ -51,7 +53,7 @@ const DefaultAPITokenMountPath = "/var/run/secrets/kubernetes.io/serviceaccount"
|
|||
const PluginName = "ServiceAccount"
|
||||
|
||||
func init() {
|
||||
admission.RegisterPlugin(PluginName, func(client client.Interface, config io.Reader) (admission.Interface, error) {
|
||||
admission.RegisterPlugin(PluginName, func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
|
||||
serviceAccountAdmission := NewServiceAccount(client)
|
||||
serviceAccountAdmission.Run()
|
||||
return serviceAccountAdmission, nil
|
||||
|
@ -70,7 +72,7 @@ type serviceAccount struct {
|
|||
// MountServiceAccountToken creates Volume and VolumeMounts for the first referenced ServiceAccountToken for the pod's service account
|
||||
MountServiceAccountToken bool
|
||||
|
||||
client client.Interface
|
||||
client clientset.Interface
|
||||
|
||||
serviceAccounts cache.Indexer
|
||||
secrets cache.Indexer
|
||||
|
@ -86,14 +88,14 @@ type serviceAccount struct {
|
|||
// 3. If LimitSecretReferences is true, it rejects the pod if the pod references Secret objects which the pod's ServiceAccount does not reference
|
||||
// 4. If the pod does not contain any ImagePullSecrets, the ImagePullSecrets of the service account are added.
|
||||
// 5. If MountServiceAccountToken is true, it adds a VolumeMount with the pod's ServiceAccount's api token secret to containers
|
||||
func NewServiceAccount(cl client.Interface) *serviceAccount {
|
||||
func NewServiceAccount(cl clientset.Interface) *serviceAccount {
|
||||
serviceAccountsIndexer, serviceAccountsReflector := cache.NewNamespaceKeyedIndexerAndReflector(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return cl.ServiceAccounts(api.NamespaceAll).List(options)
|
||||
return cl.Legacy().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return cl.ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
return cl.Legacy().ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
|
@ -105,11 +107,11 @@ func NewServiceAccount(cl client.Interface) *serviceAccount {
|
|||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = tokenSelector
|
||||
return cl.Secrets(api.NamespaceAll).List(options)
|
||||
return cl.Legacy().Secrets(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = tokenSelector
|
||||
return cl.Secrets(api.NamespaceAll).Watch(options)
|
||||
return cl.Legacy().Secrets(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Secret{},
|
||||
|
@ -251,7 +253,7 @@ func (s *serviceAccount) getServiceAccount(namespace string, name string) (*api.
|
|||
if i != 0 {
|
||||
time.Sleep(retryInterval)
|
||||
}
|
||||
serviceAccount, err := s.client.ServiceAccounts(namespace).Get(name)
|
||||
serviceAccount, err := s.client.Legacy().ServiceAccounts(namespace).Get(name)
|
||||
if err == nil {
|
||||
return serviceAccount, nil
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
kubelet "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
@ -176,7 +176,7 @@ func TestFetchesUncachedServiceAccount(t *testing.T) {
|
|||
ns := "myns"
|
||||
|
||||
// Build a test client that the admission plugin can use to look up the service account missing from its cache
|
||||
client := testclient.NewSimpleFake(&api.ServiceAccount{
|
||||
client := fake.NewSimpleClientset(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
|
@ -201,7 +201,7 @@ func TestDeniesInvalidServiceAccount(t *testing.T) {
|
|||
ns := "myns"
|
||||
|
||||
// Build a test client that the admission plugin can use to look up the service account missing from its cache
|
||||
client := testclient.NewSimpleFake()
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
admit := NewServiceAccount(client)
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/auth/authenticator/bearertoken"
|
||||
"k8s.io/kubernetes/pkg/auth/authorizer"
|
||||
"k8s.io/kubernetes/pkg/auth/user"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
|
@ -68,7 +69,7 @@ func TestServiceAccountAutoCreate(t *testing.T) {
|
|||
ns := "test-service-account-creation"
|
||||
|
||||
// Create namespace
|
||||
_, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
||||
_, err := c.Legacy().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
@ -80,7 +81,7 @@ func TestServiceAccountAutoCreate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Delete service account
|
||||
err = c.ServiceAccounts(ns).Delete(defaultUser.Name)
|
||||
err = c.Legacy().ServiceAccounts(ns).Delete(defaultUser.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not delete default serviceaccount: %v", err)
|
||||
}
|
||||
|
@ -103,13 +104,13 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||
name := "my-service-account"
|
||||
|
||||
// Create namespace
|
||||
_, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
||||
_, err := c.Legacy().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create service account
|
||||
serviceAccount, err := c.ServiceAccounts(ns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: name}})
|
||||
serviceAccount, err := c.Legacy().ServiceAccounts(ns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: name}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
|
@ -121,7 +122,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Delete token
|
||||
err = c.Secrets(ns).Delete(token1Name)
|
||||
err = c.Legacy().Secrets(ns).Delete(token1Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not delete token: %v", err)
|
||||
}
|
||||
|
@ -139,12 +140,12 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Trigger creation of a new referenced token
|
||||
serviceAccount, err = c.ServiceAccounts(ns).Get(name)
|
||||
serviceAccount, err = c.Legacy().ServiceAccounts(ns).Get(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
serviceAccount.Secrets = []api.ObjectReference{}
|
||||
_, err = c.ServiceAccounts(ns).Update(serviceAccount)
|
||||
_, err = c.Legacy().ServiceAccounts(ns).Update(serviceAccount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Delete service account
|
||||
err = c.ServiceAccounts(ns).Delete(name)
|
||||
err = c.Legacy().ServiceAccounts(ns).Delete(name, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -171,7 +172,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||
tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name)
|
||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||
// Get all secrets in the namespace
|
||||
secrets, err := c.Secrets(ns).List(api.ListOptions{})
|
||||
secrets, err := c.Legacy().Secrets(ns).List(api.ListOptions{})
|
||||
// Retrieval errors should fail
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -197,7 +198,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
|||
ns := "auto-mount-ns"
|
||||
|
||||
// Create "my" namespace
|
||||
_, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
||||
_, err := c.Legacy().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
@ -249,7 +250,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
|||
}
|
||||
expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts
|
||||
|
||||
createdPod, err := c.Pods(ns).Create(&protoPod)
|
||||
createdPod, err := c.Legacy().Pods(ns).Create(&protoPod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -275,19 +276,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
|||
otherns := "other-ns"
|
||||
|
||||
// Create "my" namespace
|
||||
_, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: myns}})
|
||||
_, err := c.Legacy().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: myns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create "other" namespace
|
||||
_, err = c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: otherns}})
|
||||
_, err = c.Legacy().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: otherns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create "ro" user in myns
|
||||
_, err = c.ServiceAccounts(myns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: readOnlyServiceAccountName}})
|
||||
_, err = c.Legacy().ServiceAccounts(myns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: readOnlyServiceAccountName}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
|
@ -297,17 +298,17 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
|||
}
|
||||
roClientConfig := config
|
||||
roClientConfig.BearerToken = roToken
|
||||
roClient := client.NewOrDie(&roClientConfig)
|
||||
roClient := clientset.NewForConfigOrDie(&roClientConfig)
|
||||
doServiceAccountAPIRequests(t, roClient, myns, true, true, false)
|
||||
doServiceAccountAPIRequests(t, roClient, otherns, true, false, false)
|
||||
err = c.Secrets(myns).Delete(roTokenName)
|
||||
err = c.Legacy().Secrets(myns).Delete(roTokenName, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("could not delete token: %v", err)
|
||||
}
|
||||
doServiceAccountAPIRequests(t, roClient, myns, false, false, false)
|
||||
|
||||
// Create "rw" user in myns
|
||||
_, err = c.ServiceAccounts(myns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: readWriteServiceAccountName}})
|
||||
_, err = c.Legacy().ServiceAccounts(myns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: readWriteServiceAccountName}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
|
@ -317,7 +318,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
|||
}
|
||||
rwClientConfig := config
|
||||
rwClientConfig.BearerToken = rwToken
|
||||
rwClient := client.NewOrDie(&rwClientConfig)
|
||||
rwClient := clientset.NewForConfigOrDie(&rwClientConfig)
|
||||
doServiceAccountAPIRequests(t, rwClient, myns, true, true, true)
|
||||
doServiceAccountAPIRequests(t, rwClient, otherns, true, false, false)
|
||||
|
||||
|
@ -328,13 +329,13 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
|||
}
|
||||
defaultClientConfig := config
|
||||
defaultClientConfig.BearerToken = defaultToken
|
||||
defaultClient := client.NewOrDie(&defaultClientConfig)
|
||||
defaultClient := clientset.NewForConfigOrDie(&defaultClientConfig)
|
||||
doServiceAccountAPIRequests(t, defaultClient, myns, true, false, false)
|
||||
}
|
||||
|
||||
// startServiceAccountTestServer returns a started server
|
||||
// It is the responsibility of the caller to ensure the returned stopFunc is called
|
||||
func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config, func()) {
|
||||
func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, client.Config, func()) {
|
||||
|
||||
deleteAllEtcdKeys()
|
||||
|
||||
|
@ -347,8 +348,8 @@ func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config,
|
|||
// Anonymous client config
|
||||
clientConfig := client.Config{Host: apiServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}
|
||||
// Root client
|
||||
rootClient := client.NewOrDie(&client.Config{Host: apiServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, BearerToken: rootToken})
|
||||
|
||||
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
|
||||
rootClientset := clientset.NewForConfigOrDie(&client.Config{Host: apiServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, BearerToken: rootToken})
|
||||
// Set up two authenticators:
|
||||
// 1. A token authenticator that maps the rootToken to the "root" user
|
||||
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
|
||||
|
@ -359,7 +360,7 @@ func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config,
|
|||
return nil, false, nil
|
||||
})
|
||||
serviceAccountKey, _ := rsa.GenerateKey(rand.Reader, 2048)
|
||||
serviceAccountTokenGetter := serviceaccountcontroller.NewGetterFromClient(rootClient)
|
||||
serviceAccountTokenGetter := serviceaccountcontroller.NewGetterFromClient(rootClientset)
|
||||
serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator([]*rsa.PublicKey{&serviceAccountKey.PublicKey}, true, serviceAccountTokenGetter)
|
||||
authenticator := union.New(
|
||||
bearertoken.New(rootTokenAuth),
|
||||
|
@ -399,7 +400,7 @@ func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config,
|
|||
})
|
||||
|
||||
// Set up admission plugin to auto-assign serviceaccounts to pods
|
||||
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(rootClient)
|
||||
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(rootClientset)
|
||||
|
||||
masterConfig := framework.NewMasterConfig()
|
||||
masterConfig.EnableIndex = true
|
||||
|
@ -411,9 +412,9 @@ func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config,
|
|||
m = master.New(masterConfig)
|
||||
|
||||
// Start the service account and service account token controllers
|
||||
tokenController := serviceaccountcontroller.NewTokensController(rootClient, serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)})
|
||||
tokenController := serviceaccountcontroller.NewTokensController(rootClientset, serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)})
|
||||
tokenController.Run()
|
||||
serviceAccountController := serviceaccountcontroller.NewServiceAccountsController(rootClient, serviceaccountcontroller.DefaultServiceAccountsControllerOptions())
|
||||
serviceAccountController := serviceaccountcontroller.NewServiceAccountsController(rootClientset, serviceaccountcontroller.DefaultServiceAccountsControllerOptions())
|
||||
serviceAccountController.Run()
|
||||
// Start the admission plugin reflectors
|
||||
serviceAccountAdmission.Run()
|
||||
|
@ -426,18 +427,18 @@ func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config,
|
|||
// apiServer.Close()
|
||||
}
|
||||
|
||||
return rootClient, clientConfig, stop
|
||||
return rootClientset, clientConfig, stop
|
||||
}
|
||||
|
||||
func getServiceAccount(c *client.Client, ns string, name string, shouldWait bool) (*api.ServiceAccount, error) {
|
||||
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*api.ServiceAccount, error) {
|
||||
if !shouldWait {
|
||||
return c.ServiceAccounts(ns).Get(name)
|
||||
return c.Legacy().ServiceAccounts(ns).Get(name)
|
||||
}
|
||||
|
||||
var user *api.ServiceAccount
|
||||
var err error
|
||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||
user, err = c.ServiceAccounts(ns).Get(name)
|
||||
user, err = c.Legacy().ServiceAccounts(ns).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -449,12 +450,12 @@ func getServiceAccount(c *client.Client, ns string, name string, shouldWait bool
|
|||
return user, err
|
||||
}
|
||||
|
||||
func getReferencedServiceAccountToken(c *client.Client, ns string, name string, shouldWait bool) (string, string, error) {
|
||||
func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name string, shouldWait bool) (string, string, error) {
|
||||
tokenName := ""
|
||||
token := ""
|
||||
|
||||
findToken := func() (bool, error) {
|
||||
user, err := c.ServiceAccounts(ns).Get(name)
|
||||
user, err := c.Legacy().ServiceAccounts(ns).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -463,7 +464,7 @@ func getReferencedServiceAccountToken(c *client.Client, ns string, name string,
|
|||
}
|
||||
|
||||
for _, ref := range user.Secrets {
|
||||
secret, err := c.Secrets(ns).Get(ref.Name)
|
||||
secret, err := c.Legacy().Secrets(ns).Get(ref.Name)
|
||||
if errors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
|
@ -505,7 +506,7 @@ func getReferencedServiceAccountToken(c *client.Client, ns string, name string,
|
|||
|
||||
type testOperation func() error
|
||||
|
||||
func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, authenticated bool, canRead bool, canWrite bool) {
|
||||
func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string, authenticated bool, canRead bool, canWrite bool) {
|
||||
testSecret := &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{Name: "testSecret"},
|
||||
Data: map[string][]byte{"test": []byte("data")},
|
||||
|
@ -513,17 +514,17 @@ func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, auth
|
|||
|
||||
readOps := []testOperation{
|
||||
func() error {
|
||||
_, err := c.Secrets(ns).List(api.ListOptions{})
|
||||
_, err := c.Legacy().Secrets(ns).List(api.ListOptions{})
|
||||
return err
|
||||
},
|
||||
func() error {
|
||||
_, err := c.Pods(ns).List(api.ListOptions{})
|
||||
_, err := c.Legacy().Pods(ns).List(api.ListOptions{})
|
||||
return err
|
||||
},
|
||||
}
|
||||
writeOps := []testOperation{
|
||||
func() error { _, err := c.Secrets(ns).Create(testSecret); return err },
|
||||
func() error { return c.Secrets(ns).Delete(testSecret.Name) },
|
||||
func() error { _, err := c.Legacy().Secrets(ns).Create(testSecret); return err },
|
||||
func() error { return c.Legacy().Secrets(ns).Delete(testSecret.Name, nil) },
|
||||
}
|
||||
|
||||
for _, op := range readOps {
|
||||
|
|
Loading…
Reference in New Issue