mirror of https://github.com/k3s-io/k3s
Merge pull request #75616 from andrewsykim/e2e-remove-internal-clientset
remove usages of internal clientset in e2e frameworkk3s-v1.15.3
commit
5d77a5217a
|
@ -57,6 +57,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -464,7 +464,7 @@ func waitForEventWithReason(c clientset.Interface, ns, cronJobName string, reaso
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
|
||||
events, err := c.CoreV1().Events(ns).Search(scheme.Scheme, sj)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -199,13 +199,12 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||
// All the restart tests need an rc and a watch on pods of the rc.
|
||||
// Additionally some of them might scale the rc during the test.
|
||||
config = testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: ns,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: numPods,
|
||||
CreatedPods: &[]*v1.Pod{},
|
||||
Client: f.ClientSet,
|
||||
Name: rcName,
|
||||
Namespace: ns,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: numPods,
|
||||
CreatedPods: &[]*v1.Pod{},
|
||||
}
|
||||
Expect(framework.RunRC(config)).NotTo(HaveOccurred())
|
||||
replacePods(*config.CreatedPods, existingPods)
|
||||
|
|
|
@ -93,7 +93,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
|
||||
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
|
||||
replicas := 1
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter)
|
||||
defer resourceConsumer.CleanUp()
|
||||
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
|
||||
|
||||
|
|
|
@ -408,14 +408,13 @@ func simpleScaleUpTest(f *framework.Framework, config *scaleUpTestConfig) func()
|
|||
|
||||
func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabytes int, timeout time.Duration) *testutils.RCConfig {
|
||||
return &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
MemRequest: int64(1024 * 1024 * megabytes / replicas),
|
||||
Client: f.ClientSet,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
MemRequest: int64(1024 * 1024 * megabytes / replicas),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -468,15 +467,14 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
|
|||
By(fmt.Sprintf("Running RC which reserves host port and memory"))
|
||||
request := int64(1024 * 1024 * megabytes / replicas)
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": port},
|
||||
MemRequest: request,
|
||||
Client: f.ClientSet,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": port},
|
||||
MemRequest: request,
|
||||
}
|
||||
err := framework.RunRC(*config)
|
||||
framework.ExpectNoError(err)
|
||||
|
|
|
@ -1322,7 +1322,6 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
|||
request := int64(1024 * 1024 * megabytes / replicas)
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
|
@ -1564,15 +1563,14 @@ func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error {
|
|||
// ScheduleGpuPod schedules a pod which requires a given number of gpus of given type
|
||||
func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error {
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
GpuLimit: gpuLimit,
|
||||
Labels: map[string]string{"requires-gpu": "yes"},
|
||||
Client: f.ClientSet,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
GpuLimit: gpuLimit,
|
||||
Labels: map[string]string{"requires-gpu": "yes"},
|
||||
}
|
||||
|
||||
if gpuType != "" {
|
||||
|
@ -1589,15 +1587,14 @@ func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit
|
|||
// Create an RC running a given number of pods with anti-affinity
|
||||
func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string) error {
|
||||
config := &testutils.RCConfig{
|
||||
Affinity: buildAntiAffinity(antiAffinityLabels),
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
Affinity: buildAntiAffinity(antiAffinityLabels),
|
||||
Client: f.ClientSet,
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
}
|
||||
err := framework.RunRC(*config)
|
||||
if err != nil {
|
||||
|
@ -1612,16 +1609,15 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id
|
|||
|
||||
func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string, volumes []v1.Volume) error {
|
||||
config := &testutils.RCConfig{
|
||||
Affinity: buildAntiAffinity(antiAffinityLabels),
|
||||
Volumes: volumes,
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
Affinity: buildAntiAffinity(antiAffinityLabels),
|
||||
Volumes: volumes,
|
||||
Client: f.ClientSet,
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
}
|
||||
err := framework.RunRC(*config)
|
||||
if err != nil {
|
||||
|
@ -1695,15 +1691,14 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
|||
}
|
||||
}
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: defaultTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 0,
|
||||
Labels: labels,
|
||||
MemRequest: memRequest,
|
||||
Client: f.ClientSet,
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: defaultTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 0,
|
||||
Labels: labels,
|
||||
MemRequest: memRequest,
|
||||
}
|
||||
err := framework.RunRC(*config)
|
||||
if err != nil {
|
||||
|
|
|
@ -116,7 +116,7 @@ type HPAScaleTest struct {
|
|||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
|
||||
defer rc.CleanUp()
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
|
|
|
@ -46,7 +46,6 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
|
@ -86,7 +85,6 @@ type ResourceConsumer struct {
|
|||
kind schema.GroupVersionKind
|
||||
nsName string
|
||||
clientSet clientset.Interface
|
||||
internalClientset *internalclientset.Clientset
|
||||
scaleClient scaleclient.ScalesGetter
|
||||
cpu chan int
|
||||
mem chan int
|
||||
|
@ -106,20 +104,20 @@ func GetResourceConsumerImage() string {
|
|||
return resourceConsumerImage
|
||||
}
|
||||
|
||||
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
|
||||
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset, scaleClient, nil, nil)
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil)
|
||||
}
|
||||
|
||||
// TODO this still defaults to replication controller
|
||||
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
|
||||
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
|
||||
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset, scaleClient, nil, nil)
|
||||
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, scaleClient, nil, nil)
|
||||
}
|
||||
|
||||
func NewMetricExporter(name, nsName string, podAnnotations, serviceAnnotations map[string]string, metricValue int, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
|
||||
func NewMetricExporter(name, nsName string, podAnnotations, serviceAnnotations map[string]string, metricValue int, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, KindDeployment, 1, 0, 0, metricValue, dynamicConsumptionTimeInSeconds,
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, 100, 100, clientset, internalClientset, scaleClient, podAnnotations, serviceAnnotations)
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, 100, 100, clientset, scaleClient, podAnnotations, serviceAnnotations)
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -130,21 +128,20 @@ memLimit argument is in megabytes, memLimit is a maximum amount of memory that c
|
|||
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
||||
*/
|
||||
func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
|
||||
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer {
|
||||
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer {
|
||||
if podAnnotations == nil {
|
||||
podAnnotations = make(map[string]string)
|
||||
}
|
||||
if serviceAnnotations == nil {
|
||||
serviceAnnotations = make(map[string]string)
|
||||
}
|
||||
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations)
|
||||
runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations)
|
||||
rc := &ResourceConsumer{
|
||||
name: name,
|
||||
controllerName: name + "-ctrl",
|
||||
kind: kind,
|
||||
nsName: nsName,
|
||||
clientSet: clientset,
|
||||
internalClientset: internalClientset,
|
||||
scaleClient: scaleClient,
|
||||
cpu: make(chan int),
|
||||
mem: make(chan int),
|
||||
|
@ -436,7 +433,7 @@ func (rc *ResourceConsumer) CleanUp() {
|
|||
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
|
||||
}
|
||||
|
||||
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) {
|
||||
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) {
|
||||
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
|
||||
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -457,18 +454,17 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
|
|||
framework.ExpectNoError(err)
|
||||
|
||||
rcConfig := testutils.RCConfig{
|
||||
Client: c,
|
||||
InternalClient: internalClient,
|
||||
Image: resourceConsumerImage,
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
Timeout: timeoutRC,
|
||||
Replicas: replicas,
|
||||
CpuRequest: cpuLimitMillis,
|
||||
CpuLimit: cpuLimitMillis,
|
||||
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
|
||||
MemLimit: memLimitMb * 1024 * 1024,
|
||||
Annotations: podAnnotations,
|
||||
Client: c,
|
||||
Image: resourceConsumerImage,
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
Timeout: timeoutRC,
|
||||
Replicas: replicas,
|
||||
CpuRequest: cpuLimitMillis,
|
||||
CpuLimit: cpuLimitMillis,
|
||||
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
|
||||
MemLimit: memLimitMb * 1024 * 1024,
|
||||
Annotations: podAnnotations,
|
||||
}
|
||||
|
||||
switch kind {
|
||||
|
|
|
@ -47,7 +47,6 @@ go_library(
|
|||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
|
|
|
@ -47,7 +47,6 @@ import (
|
|||
scaleclient "k8s.io/client-go/scale"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
|
@ -75,9 +74,8 @@ type Framework struct {
|
|||
ClientSet clientset.Interface
|
||||
KubemarkExternalClusterClientSet clientset.Interface
|
||||
|
||||
InternalClientset *internalclientset.Clientset
|
||||
AggregatorClient *aggregatorclient.Clientset
|
||||
DynamicClient dynamic.Interface
|
||||
AggregatorClient *aggregatorclient.Clientset
|
||||
DynamicClient dynamic.Interface
|
||||
|
||||
ScalesGetter scaleclient.ScalesGetter
|
||||
|
||||
|
@ -180,8 +178,6 @@ func (f *Framework) BeforeEach() {
|
|||
}
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
f.InternalClientset, err = internalclientset.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
|
|
|
@ -39,7 +39,6 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/retry"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
@ -1321,7 +1320,7 @@ func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin
|
|||
|
||||
// StartServeHostnameService creates a replication controller that serves its
|
||||
// hostname and a service on top of it.
|
||||
func StartServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
|
||||
func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
|
||||
podNames := make([]string, replicas)
|
||||
name := svc.ObjectMeta.Name
|
||||
By("creating service " + name + " in namespace " + ns)
|
||||
|
@ -1334,7 +1333,6 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli
|
|||
maxContainerFailures := 0
|
||||
config := testutils.RCConfig{
|
||||
Client: c,
|
||||
InternalClient: internalClient,
|
||||
Image: ServeHostnameImage,
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
|
|
|
@ -80,7 +80,6 @@ import (
|
|||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
|
@ -2133,13 +2132,6 @@ func LoadConfig() (*restclient.Config, error) {
|
|||
|
||||
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
|
||||
}
|
||||
func LoadInternalClientset() (*internalclientset.Clientset, error) {
|
||||
config, err := LoadConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client: %v", err.Error())
|
||||
}
|
||||
return internalclientset.NewForConfig(config)
|
||||
}
|
||||
|
||||
func LoadClientset() (*clientset.Clientset, error) {
|
||||
config, err := LoadConfig()
|
||||
|
|
|
@ -105,7 +105,7 @@ func prometheusPodCustomMetricQuery(namespace, podNamePrefix string) string {
|
|||
|
||||
func consumeCPUResources(f *framework.Framework, consumerName string, cpuUsage int) *common.ResourceConsumer {
|
||||
return common.NewDynamicResourceConsumer(consumerName, f.Namespace.Name, common.KindDeployment, 1, cpuUsage,
|
||||
memoryUsed, 0, int64(cpuUsage), memoryLimit, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
memoryUsed, 0, int64(cpuUsage), memoryLimit, f.ClientSet, f.ScalesGetter)
|
||||
}
|
||||
|
||||
func exportCustomMetricFromPod(f *framework.Framework, consumerName string, metricValue int) *common.ResourceConsumer {
|
||||
|
@ -114,7 +114,7 @@ func exportCustomMetricFromPod(f *framework.Framework, consumerName string, metr
|
|||
"prometheus.io/path": "/metrics",
|
||||
"prometheus.io/port": "8080",
|
||||
}
|
||||
return common.NewMetricExporter(consumerName, f.Namespace.Name, podAnnotations, nil, metricValue, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
return common.NewMetricExporter(consumerName, f.Namespace.Name, podAnnotations, nil, metricValue, f.ClientSet, f.ScalesGetter)
|
||||
}
|
||||
|
||||
func exportCustomMetricFromService(f *framework.Framework, consumerName string, metricValue int) *common.ResourceConsumer {
|
||||
|
@ -123,7 +123,7 @@ func exportCustomMetricFromService(f *framework.Framework, consumerName string,
|
|||
"prometheus.io/path": "/metrics",
|
||||
"prometheus.io/port": "8080",
|
||||
}
|
||||
return common.NewMetricExporter(consumerName, f.Namespace.Name, nil, serviceAnnotations, metricValue, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
return common.NewMetricExporter(consumerName, f.Namespace.Name, nil, serviceAnnotations, metricValue, f.ClientSet, f.ScalesGetter)
|
||||
}
|
||||
|
||||
func validateMetricAvailableForAllNodes(c clientset.Interface, metric string, expectedNodesNames []string) error {
|
||||
|
|
|
@ -101,7 +101,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
|||
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter)
|
||||
defer rc.CleanUp()
|
||||
|
||||
rc.WaitForReplicas(pods, 15*time.Minute)
|
||||
|
|
|
@ -32,7 +32,6 @@ go_library(
|
|||
importpath = "k8s.io/kubernetes/test/e2e/network",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller/endpoint:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
|
|
|
@ -122,13 +122,12 @@ var _ = SIGDescribe("Proxy", func() {
|
|||
By("starting an echo server on multiple ports")
|
||||
pods := []*v1.Pod{}
|
||||
cfg := testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Image: imageutils.GetE2EImage(imageutils.Porter),
|
||||
Name: service.Name,
|
||||
Namespace: f.Namespace.Name,
|
||||
Replicas: 1,
|
||||
PollInterval: time.Second,
|
||||
Client: f.ClientSet,
|
||||
Image: imageutils.GetE2EImage(imageutils.Porter),
|
||||
Name: service.Name,
|
||||
Namespace: f.Namespace.Name,
|
||||
Replicas: 1,
|
||||
PollInterval: time.Second,
|
||||
Env: map[string]string{
|
||||
"SERVE_PORT_80": `<a href="/rewriteme">test</a>`,
|
||||
"SERVE_PORT_1080": `<a href="/rewriteme">test</a>`,
|
||||
|
|
|
@ -35,7 +35,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -80,12 +79,10 @@ var _ = SIGDescribe("Services", func() {
|
|||
f := framework.NewDefaultFramework("services")
|
||||
|
||||
var cs clientset.Interface
|
||||
var internalClientset internalclientset.Interface
|
||||
serviceLBNames := []string{}
|
||||
|
||||
BeforeEach(func() {
|
||||
cs = f.ClientSet
|
||||
internalClientset = f.InternalClientset
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
|
@ -322,10 +319,10 @@ var _ = SIGDescribe("Services", func() {
|
|||
numPods, servicePort := 3, defaultServeHostnameServicePort
|
||||
|
||||
By("creating service1 in namespace " + ns)
|
||||
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service1"), ns, numPods)
|
||||
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
|
||||
By("creating service2 in namespace " + ns)
|
||||
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service2"), ns, numPods)
|
||||
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
|
||||
|
||||
hosts, err := framework.NodeSSHHosts(cs)
|
||||
|
@ -352,7 +349,7 @@ var _ = SIGDescribe("Services", func() {
|
|||
|
||||
// Start another service and verify both are up.
|
||||
By("creating service3 in namespace " + ns)
|
||||
podNames3, svc3IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service3"), ns, numPods)
|
||||
podNames3, svc3IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service3"), ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns)
|
||||
|
||||
if svc2IP == svc3IP {
|
||||
|
@ -379,13 +376,13 @@ var _ = SIGDescribe("Services", func() {
|
|||
defer func() {
|
||||
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1))
|
||||
}()
|
||||
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService(svc1), ns, numPods)
|
||||
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
|
||||
|
||||
defer func() {
|
||||
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2))
|
||||
}()
|
||||
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService(svc2), ns, numPods)
|
||||
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
|
||||
|
||||
if svc1IP == svc2IP {
|
||||
|
@ -432,7 +429,7 @@ var _ = SIGDescribe("Services", func() {
|
|||
defer func() {
|
||||
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1"))
|
||||
}()
|
||||
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service1"), ns, numPods)
|
||||
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
|
||||
|
||||
hosts, err := framework.NodeSSHHosts(cs)
|
||||
|
@ -459,7 +456,7 @@ var _ = SIGDescribe("Services", func() {
|
|||
defer func() {
|
||||
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service2"))
|
||||
}()
|
||||
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service2"), ns, numPods)
|
||||
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
|
||||
|
||||
if svc1IP == svc2IP {
|
||||
|
@ -1743,12 +1740,12 @@ var _ = SIGDescribe("Services", func() {
|
|||
By("creating service-disabled in namespace " + ns)
|
||||
svcDisabled := getServeHostnameService("service-disabled")
|
||||
svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels
|
||||
_, svcDisabledIP, err := framework.StartServeHostnameService(cs, internalClientset, svcDisabled, ns, numPods)
|
||||
_, svcDisabledIP, err := framework.StartServeHostnameService(cs, svcDisabled, ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns)
|
||||
|
||||
By("creating service in namespace " + ns)
|
||||
svcToggled := getServeHostnameService("service")
|
||||
podToggledNames, svcToggledIP, err := framework.StartServeHostnameService(cs, internalClientset, svcToggled, ns, numPods)
|
||||
podToggledNames, svcToggledIP, err := framework.StartServeHostnameService(cs, svcToggled, ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns)
|
||||
|
||||
jig := framework.NewServiceTestJig(cs, svcToggled.ObjectMeta.Name)
|
||||
|
@ -2211,7 +2208,7 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf
|
|||
By("creating service in namespace " + ns)
|
||||
serviceType := svc.Spec.Type
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
||||
_, _, err := framework.StartServeHostnameService(cs, f.InternalClientset, svc, ns, numPods)
|
||||
_, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns)
|
||||
defer func() {
|
||||
framework.StopServeHostnameService(cs, ns, serviceName)
|
||||
|
@ -2262,7 +2259,7 @@ func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface
|
|||
|
||||
By("creating service in namespace " + ns)
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
||||
_, _, err := framework.StartServeHostnameService(cs, f.InternalClientset, svc, ns, numPods)
|
||||
_, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns)
|
||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||
By("waiting for loadbalancer for service " + ns + "/" + serviceName)
|
||||
|
|
|
@ -128,13 +128,12 @@ var _ = SIGDescribe("Service endpoints latency", func() {
|
|||
|
||||
func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) {
|
||||
cfg := testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "svc-latency-rc",
|
||||
Namespace: f.Namespace.Name,
|
||||
Replicas: 1,
|
||||
PollInterval: time.Second,
|
||||
Client: f.ClientSet,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "svc-latency-rc",
|
||||
Namespace: f.Namespace.Name,
|
||||
Replicas: 1,
|
||||
PollInterval: time.Second,
|
||||
}
|
||||
if err := framework.RunRC(cfg); err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -315,13 +315,12 @@ var _ = SIGDescribe("kubelet", func() {
|
|||
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
|
||||
|
||||
Expect(framework.RunRC(testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
NodeSelector: nodeLabels,
|
||||
Client: f.ClientSet,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
NodeSelector: nodeLabels,
|
||||
})).NotTo(HaveOccurred())
|
||||
// Perform a sanity check so that we know all desired pods are
|
||||
// running on the nodes according to kubelet. The timeout is set to
|
||||
|
|
|
@ -70,12 +70,11 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
|||
|
||||
// TODO: Use a more realistic workload
|
||||
Expect(framework.RunRC(testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
Client: f.ClientSet,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
})).NotTo(HaveOccurred())
|
||||
|
||||
// Log once and flush the stats.
|
||||
|
|
|
@ -15,7 +15,6 @@ go_library(
|
|||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
|
@ -42,7 +42,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
@ -70,12 +69,11 @@ var MaxMissingPodStartupMeasurements = 0
|
|||
var nodeCount = 0
|
||||
|
||||
type DensityTestConfig struct {
|
||||
Configs []testutils.RunObjectConfig
|
||||
ClientSets []clientset.Interface
|
||||
InternalClientsets []internalclientset.Interface
|
||||
ScaleClients []scaleclient.ScalesGetter
|
||||
PollInterval time.Duration
|
||||
PodCount int
|
||||
Configs []testutils.RunObjectConfig
|
||||
ClientSets []clientset.Interface
|
||||
ScaleClients []scaleclient.ScalesGetter
|
||||
PollInterval time.Duration
|
||||
PodCount int
|
||||
// What kind of resource we want to create
|
||||
kind schema.GroupKind
|
||||
SecretConfigs []*testutils.SecretConfig
|
||||
|
@ -644,7 +642,7 @@ var _ = SIGDescribe("Density", func() {
|
|||
}
|
||||
timeout += 3 * time.Minute
|
||||
// createClients is defined in load.go
|
||||
clients, internalClients, scalesClients, err := createClients(numberOfCollections)
|
||||
clients, scalesClients, err := createClients(numberOfCollections)
|
||||
framework.ExpectNoError(err)
|
||||
for i := 0; i < numberOfCollections; i++ {
|
||||
nsName := namespaces[i].Name
|
||||
|
@ -675,7 +673,6 @@ var _ = SIGDescribe("Density", func() {
|
|||
name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
|
||||
baseConfig := &testutils.RCConfig{
|
||||
Client: clients[i],
|
||||
InternalClient: internalClients[i],
|
||||
ScalesGetter: scalesClients[i],
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: name,
|
||||
|
@ -722,18 +719,17 @@ var _ = SIGDescribe("Density", func() {
|
|||
}
|
||||
|
||||
// Single client is running out of http2 connections in delete phase, hence we need more.
|
||||
clients, internalClients, scalesClients, err = createClients(2)
|
||||
clients, scalesClients, err = createClients(2)
|
||||
framework.ExpectNoError(err)
|
||||
dConfig := DensityTestConfig{
|
||||
ClientSets: clients,
|
||||
InternalClientsets: internalClients,
|
||||
ScaleClients: scalesClients,
|
||||
Configs: configs,
|
||||
PodCount: totalPods,
|
||||
PollInterval: DensityPollInterval,
|
||||
kind: itArg.kind,
|
||||
SecretConfigs: secretConfigs,
|
||||
ConfigMapConfigs: configMapConfigs,
|
||||
ClientSets: clients,
|
||||
ScaleClients: scalesClients,
|
||||
Configs: configs,
|
||||
PodCount: totalPods,
|
||||
PollInterval: DensityPollInterval,
|
||||
kind: itArg.kind,
|
||||
SecretConfigs: secretConfigs,
|
||||
ConfigMapConfigs: configMapConfigs,
|
||||
}
|
||||
|
||||
for i := 0; i < itArg.daemonsPerNode; i++ {
|
||||
|
|
|
@ -49,7 +49,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
@ -341,9 +340,8 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||
}
|
||||
})
|
||||
|
||||
func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, []scaleclient.ScalesGetter, error) {
|
||||
func createClients(numberOfClients int) ([]clientset.Interface, []scaleclient.ScalesGetter, error) {
|
||||
clients := make([]clientset.Interface, numberOfClients)
|
||||
internalClients := make([]internalclientset.Interface, numberOfClients)
|
||||
scalesClients := make([]scaleclient.ScalesGetter, numberOfClients)
|
||||
|
||||
for i := 0; i < numberOfClients; i++ {
|
||||
|
@ -361,11 +359,11 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
|
|||
// each client here.
|
||||
transportConfig, err := config.TransportConfig()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
tlsConfig, err := transport.TLSConfigFor(transportConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
config.Transport = utilnet.SetTransportDefaults(&http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
|
@ -387,14 +385,9 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
|
|||
|
||||
c, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
clients[i] = c
|
||||
internalClient, err := internalclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
internalClients[i] = internalClient
|
||||
|
||||
// create scale client, if GroupVersion or NegotiatedSerializer are not set
|
||||
// assign default values - these fields are mandatory (required by RESTClientFor).
|
||||
|
@ -406,11 +399,11 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
|
|||
}
|
||||
restClient, err := restclient.RESTClientFor(config)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
||||
|
@ -418,7 +411,7 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
|
|||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
scalesClients[i] = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
}
|
||||
return clients, internalClients, scalesClients, nil
|
||||
return clients, scalesClients, nil
|
||||
}
|
||||
|
||||
func computePodCounts(total int) (int, int, int) {
|
||||
|
@ -478,12 +471,11 @@ func generateConfigs(
|
|||
// Create a number of clients to better simulate real usecase
|
||||
// where not everyone is using exactly the same client.
|
||||
rcsPerClient := 20
|
||||
clients, internalClients, scalesClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
|
||||
clients, scalesClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for i := 0; i < len(configs); i++ {
|
||||
configs[i].SetClient(clients[i%len(clients)])
|
||||
configs[i].SetInternalClient(internalClients[i%len(internalClients)])
|
||||
configs[i].SetScalesClient(scalesClients[i%len(clients)])
|
||||
}
|
||||
for i := 0; i < len(secretConfigs); i++ {
|
||||
|
@ -542,7 +534,6 @@ func GenerateConfigsForGroup(
|
|||
|
||||
baseConfig := &testutils.RCConfig{
|
||||
Client: nil, // this will be overwritten later
|
||||
InternalClient: nil, // this will be overwritten later
|
||||
Name: groupName + "-" + strconv.Itoa(i),
|
||||
Namespace: namespace,
|
||||
Timeout: UnreadyNodeToleration,
|
||||
|
|
|
@ -272,15 +272,14 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
|
|||
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
|
||||
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: defaultTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": 4321},
|
||||
NodeSelector: nodeSelector,
|
||||
Client: f.ClientSet,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: defaultTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": 4321},
|
||||
NodeSelector: nodeSelector,
|
||||
}
|
||||
err := framework.RunRC(*config)
|
||||
if expectRunning {
|
||||
|
|
|
@ -787,14 +787,13 @@ func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
|
|||
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
|
||||
By(fmt.Sprintf("Running RC which reserves host port"))
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: defaultTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": 4321},
|
||||
Client: f.ClientSet,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: defaultTimeout,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": 4321},
|
||||
}
|
||||
err := framework.RunRC(*config)
|
||||
if expectRunning {
|
||||
|
|
|
@ -49,7 +49,6 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
|
|||
500, /* cpuLimit */
|
||||
200, /* memLimit */
|
||||
f.ClientSet,
|
||||
f.InternalClientset,
|
||||
f.ScalesGetter)
|
||||
t.hpa = common.CreateCPUHorizontalPodAutoscaler(
|
||||
t.rc,
|
||||
|
|
|
@ -30,7 +30,6 @@ go_library(
|
|||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
|
|
|
@ -45,7 +45,6 @@ import (
|
|||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
@ -99,10 +98,8 @@ type RunObjectConfig interface {
|
|||
GetNamespace() string
|
||||
GetKind() schema.GroupKind
|
||||
GetClient() clientset.Interface
|
||||
GetInternalClient() internalclientset.Interface
|
||||
GetScalesGetter() scaleclient.ScalesGetter
|
||||
SetClient(clientset.Interface)
|
||||
SetInternalClient(internalclientset.Interface)
|
||||
SetScalesClient(scaleclient.ScalesGetter)
|
||||
GetReplicas() int
|
||||
GetLabelValue(string) (string, bool)
|
||||
|
@ -112,7 +109,6 @@ type RunObjectConfig interface {
|
|||
type RCConfig struct {
|
||||
Affinity *v1.Affinity
|
||||
Client clientset.Interface
|
||||
InternalClient internalclientset.Interface
|
||||
ScalesGetter scaleclient.ScalesGetter
|
||||
Image string
|
||||
Command []string
|
||||
|
@ -527,10 +523,6 @@ func (config *RCConfig) GetClient() clientset.Interface {
|
|||
return config.Client
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetInternalClient() internalclientset.Interface {
|
||||
return config.InternalClient
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetScalesGetter() scaleclient.ScalesGetter {
|
||||
return config.ScalesGetter
|
||||
}
|
||||
|
@ -539,10 +531,6 @@ func (config *RCConfig) SetClient(c clientset.Interface) {
|
|||
config.Client = c
|
||||
}
|
||||
|
||||
func (config *RCConfig) SetInternalClient(c internalclientset.Interface) {
|
||||
config.InternalClient = c
|
||||
}
|
||||
|
||||
func (config *RCConfig) SetScalesClient(getter scaleclient.ScalesGetter) {
|
||||
config.ScalesGetter = getter
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue