2015-02-23 14:50:40 +00:00
/ *
2016-06-03 00:25:58 +00:00
Copyright 2015 The Kubernetes Authors .
2015-02-23 14:50:40 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package e2e
import (
"fmt"
2015-04-28 11:58:20 +00:00
"math"
2015-05-26 14:24:46 +00:00
"os"
2015-06-03 14:56:16 +00:00
"sort"
2015-03-27 10:14:54 +00:00
"strconv"
2015-06-03 14:56:16 +00:00
"sync"
2015-02-23 14:50:40 +00:00
"time"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/api"
2016-03-01 14:04:08 +00:00
"k8s.io/kubernetes/pkg/api/resource"
2016-11-18 20:55:17 +00:00
"k8s.io/kubernetes/pkg/api/v1"
2016-12-08 14:13:45 +00:00
"k8s.io/kubernetes/pkg/apis/batch"
2016-11-25 17:15:00 +00:00
"k8s.io/kubernetes/pkg/apis/extensions"
2016-12-03 19:06:03 +00:00
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
2015-09-03 21:40:58 +00:00
"k8s.io/kubernetes/pkg/client/cache"
2016-12-14 01:18:17 +00:00
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
2016-12-14 03:39:50 +00:00
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
2016-11-25 17:15:00 +00:00
"k8s.io/kubernetes/pkg/runtime/schema"
2016-05-20 14:14:02 +00:00
"k8s.io/kubernetes/pkg/util/sets"
2016-07-26 15:13:18 +00:00
utiluuid "k8s.io/kubernetes/pkg/util/uuid"
2016-09-16 11:09:45 +00:00
"k8s.io/kubernetes/pkg/util/workqueue"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/watch"
2016-04-07 17:21:31 +00:00
"k8s.io/kubernetes/test/e2e/framework"
2016-10-12 11:37:37 +00:00
testutils "k8s.io/kubernetes/test/utils"
2015-02-23 14:50:40 +00:00
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
2016-02-26 14:37:35 +00:00
const (
MinSaturationThreshold = 2 * time . Minute
2016-03-04 08:07:52 +00:00
MinPodsPerSecondThroughput = 8
2016-11-02 10:20:24 +00:00
DensityPollInterval = 10 * time . Second
2016-02-26 14:37:35 +00:00
)
2015-06-19 17:04:37 +00:00
2015-07-07 01:11:30 +00:00
// Maximum container failures this test tolerates before failing.
var MaxContainerFailures = 0
2016-05-20 14:14:02 +00:00
type DensityTestConfig struct {
2016-11-25 17:15:00 +00:00
Configs [ ] testutils . RunObjectConfig
2016-11-18 20:55:17 +00:00
ClientSet clientset . Interface
InternalClientset internalclientset . Interface
PollInterval time . Duration
PodCount int
2016-11-25 17:15:00 +00:00
// What kind of resource we want to create
2016-12-01 15:43:27 +00:00
kind schema . GroupKind
SecretConfigs [ ] * testutils . SecretConfig
2016-12-07 14:51:57 +00:00
DaemonConfigs [ ] * testutils . DaemonConfig
2016-05-20 14:14:02 +00:00
}
2016-05-31 07:29:04 +00:00
func density30AddonResourceVerifier ( numNodes int ) map [ string ] framework . ResourceConstraint {
var apiserverMem uint64
var controllerMem uint64
var schedulerMem uint64
apiserverCPU := math . MaxFloat32
apiserverMem = math . MaxUint64
controllerCPU := math . MaxFloat32
controllerMem = math . MaxUint64
schedulerCPU := math . MaxFloat32
schedulerMem = math . MaxUint64
2016-08-18 09:30:03 +00:00
framework . Logf ( "Setting resource constraings for provider: %s" , framework . TestContext . Provider )
2016-05-31 07:29:04 +00:00
if framework . ProviderIs ( "kubemark" ) {
if numNodes <= 5 {
2016-09-12 08:39:06 +00:00
apiserverCPU = 0.35
2016-05-31 07:29:04 +00:00
apiserverMem = 150 * ( 1024 * 1024 )
2016-11-10 09:21:27 +00:00
controllerCPU = 0.15
2016-05-31 07:29:04 +00:00
controllerMem = 100 * ( 1024 * 1024 )
schedulerCPU = 0.05
schedulerMem = 50 * ( 1024 * 1024 )
} else if numNodes <= 100 {
apiserverCPU = 1.5
apiserverMem = 1500 * ( 1024 * 1024 )
controllerCPU = 0.75
controllerMem = 750 * ( 1024 * 1024 )
schedulerCPU = 0.75
schedulerMem = 500 * ( 1024 * 1024 )
} else if numNodes <= 500 {
2016-11-16 15:33:53 +00:00
apiserverCPU = 3.5
2016-08-26 08:19:45 +00:00
apiserverMem = 3400 * ( 1024 * 1024 )
2016-08-22 07:52:35 +00:00
controllerCPU = 1.3
2016-05-31 07:29:04 +00:00
controllerMem = 1100 * ( 1024 * 1024 )
2016-08-26 08:19:45 +00:00
schedulerCPU = 1.5
2016-05-31 07:29:04 +00:00
schedulerMem = 500 * ( 1024 * 1024 )
} else if numNodes <= 1000 {
2016-11-16 15:33:53 +00:00
apiserverCPU = 5.5
2016-05-31 07:29:04 +00:00
apiserverMem = 4000 * ( 1024 * 1024 )
controllerCPU = 3
controllerMem = 2000 * ( 1024 * 1024 )
schedulerCPU = 1.5
schedulerMem = 750 * ( 1024 * 1024 )
}
} else {
if numNodes <= 100 {
2016-08-22 07:52:35 +00:00
// TODO: Investigate higher apiserver consumption and
// potentially revert to 1.5cpu and 1.3GB - see #30871
apiserverCPU = 1.8
2016-08-26 08:19:45 +00:00
apiserverMem = 2200 * ( 1024 * 1024 )
2016-05-31 07:29:04 +00:00
controllerCPU = 0.5
controllerMem = 300 * ( 1024 * 1024 )
schedulerCPU = 0.4
schedulerMem = 150 * ( 1024 * 1024 )
}
}
2016-04-07 17:21:31 +00:00
constraints := make ( map [ string ] framework . ResourceConstraint )
constraints [ "fluentd-elasticsearch" ] = framework . ResourceConstraint {
2016-05-25 06:16:01 +00:00
CPUConstraint : 0.2 ,
2016-04-07 17:21:31 +00:00
MemoryConstraint : 250 * ( 1024 * 1024 ) ,
2015-11-30 14:29:40 +00:00
}
2016-04-07 17:21:31 +00:00
constraints [ "elasticsearch-logging" ] = framework . ResourceConstraint {
CPUConstraint : 2 ,
2015-12-29 11:17:42 +00:00
// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
2016-04-07 17:21:31 +00:00
MemoryConstraint : 5000 * ( 1024 * 1024 ) ,
2015-11-30 14:29:40 +00:00
}
2016-04-07 17:21:31 +00:00
constraints [ "heapster" ] = framework . ResourceConstraint {
CPUConstraint : 2 ,
MemoryConstraint : 1800 * ( 1024 * 1024 ) ,
2015-11-30 14:29:40 +00:00
}
2016-04-07 17:21:31 +00:00
constraints [ "kibana-logging" ] = framework . ResourceConstraint {
CPUConstraint : 0.2 ,
MemoryConstraint : 100 * ( 1024 * 1024 ) ,
2015-11-30 14:29:40 +00:00
}
2016-04-07 17:21:31 +00:00
constraints [ "kube-proxy" ] = framework . ResourceConstraint {
2016-11-10 09:21:27 +00:00
CPUConstraint : 0.15 ,
MemoryConstraint : 30 * ( 1024 * 1024 ) ,
2015-11-30 14:29:40 +00:00
}
2016-04-07 17:21:31 +00:00
constraints [ "l7-lb-controller" ] = framework . ResourceConstraint {
2016-09-12 08:39:06 +00:00
CPUConstraint : 0.15 ,
2016-08-18 09:30:03 +00:00
MemoryConstraint : 60 * ( 1024 * 1024 ) ,
2015-11-30 14:29:40 +00:00
}
2016-04-07 17:21:31 +00:00
constraints [ "influxdb" ] = framework . ResourceConstraint {
CPUConstraint : 2 ,
MemoryConstraint : 500 * ( 1024 * 1024 ) ,
2015-11-30 14:29:40 +00:00
}
2016-05-31 07:29:04 +00:00
constraints [ "kube-apiserver" ] = framework . ResourceConstraint {
CPUConstraint : apiserverCPU ,
MemoryConstraint : apiserverMem ,
}
constraints [ "kube-controller-manager" ] = framework . ResourceConstraint {
CPUConstraint : controllerCPU ,
MemoryConstraint : controllerMem ,
}
constraints [ "kube-scheduler" ] = framework . ResourceConstraint {
CPUConstraint : schedulerCPU ,
MemoryConstraint : schedulerMem ,
}
2015-11-30 14:29:40 +00:00
return constraints
}
2016-11-18 20:55:17 +00:00
func logPodStartupStatus ( c clientset . Interface , expectedPods int , observedLabels map [ string ] string , period time . Duration , stopCh chan struct { } ) {
2016-04-18 23:13:19 +00:00
label := labels . SelectorFromSet ( labels . Set ( observedLabels ) )
2016-11-18 20:55:17 +00:00
podStore := testutils . NewPodStore ( c , v1 . NamespaceAll , label , fields . Everything ( ) )
2016-04-18 23:13:19 +00:00
defer podStore . Stop ( )
ticker := time . NewTicker ( period )
2016-07-18 14:20:22 +00:00
defer ticker . Stop ( )
2016-04-18 23:13:19 +00:00
for {
select {
case <- ticker . C :
pods := podStore . List ( )
2016-10-12 11:37:37 +00:00
startupStatus := testutils . ComputeRCStartupStatus ( pods , expectedPods )
2016-10-11 14:46:33 +00:00
framework . Logf ( startupStatus . String ( "Density" ) )
2016-04-18 23:13:19 +00:00
case <- stopCh :
pods := podStore . List ( )
2016-10-12 11:37:37 +00:00
startupStatus := testutils . ComputeRCStartupStatus ( pods , expectedPods )
2016-10-11 14:46:33 +00:00
framework . Logf ( startupStatus . String ( "Density" ) )
2016-04-18 23:13:19 +00:00
return
}
}
}
2016-05-20 14:14:02 +00:00
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest ( dtc DensityTestConfig ) time . Duration {
defer GinkgoRecover ( )
2016-12-01 15:43:27 +00:00
// Create all secrets
for i := range dtc . SecretConfigs {
dtc . SecretConfigs [ i ] . Run ( )
}
2016-12-07 14:51:57 +00:00
for i := range dtc . DaemonConfigs {
dtc . DaemonConfigs [ i ] . Run ( )
}
2016-05-20 14:14:02 +00:00
// Start all replication controllers.
startTime := time . Now ( )
wg := sync . WaitGroup { }
wg . Add ( len ( dtc . Configs ) )
for i := range dtc . Configs {
2016-11-25 17:15:00 +00:00
config := dtc . Configs [ i ]
2016-05-20 14:14:02 +00:00
go func ( ) {
2016-10-13 12:12:41 +00:00
defer GinkgoRecover ( )
2016-11-01 09:56:20 +00:00
// Call wg.Done() in defer to avoid blocking whole test
// in case of error from RunRC.
defer wg . Done ( )
2016-11-25 17:15:00 +00:00
framework . ExpectNoError ( config . Run ( ) )
2016-05-20 14:14:02 +00:00
} ( )
}
logStopCh := make ( chan struct { } )
2016-10-28 11:50:39 +00:00
go logPodStartupStatus ( dtc . ClientSet , dtc . PodCount , map [ string ] string { "type" : "densityPod" } , dtc . PollInterval , logStopCh )
2016-05-20 14:14:02 +00:00
wg . Wait ( )
startupTime := time . Now ( ) . Sub ( startTime )
close ( logStopCh )
framework . Logf ( "E2E startup time for %d pods: %v" , dtc . PodCount , startupTime )
framework . Logf ( "Throughput (pods/s) during cluster saturation phase: %v" , float32 ( dtc . PodCount ) / float32 ( startupTime / time . Second ) )
// Print some data about Pod to Node allocation
By ( "Printing Pod to Node allocation data" )
2016-11-18 20:55:17 +00:00
podList , err := dtc . ClientSet . Core ( ) . Pods ( v1 . NamespaceAll ) . List ( v1 . ListOptions { } )
2016-05-20 14:14:02 +00:00
framework . ExpectNoError ( err )
pausePodAllocation := make ( map [ string ] int )
systemPodAllocation := make ( map [ string ] [ ] string )
for _ , pod := range podList . Items {
if pod . Namespace == api . NamespaceSystem {
systemPodAllocation [ pod . Spec . NodeName ] = append ( systemPodAllocation [ pod . Spec . NodeName ] , pod . Name )
} else {
pausePodAllocation [ pod . Spec . NodeName ] ++
}
}
nodeNames := make ( [ ] string , 0 )
for k := range pausePodAllocation {
nodeNames = append ( nodeNames , k )
}
sort . Strings ( nodeNames )
for _ , node := range nodeNames {
framework . Logf ( "%v: %v pause pods, system pods: %v" , node , pausePodAllocation [ node ] , systemPodAllocation [ node ] )
}
return startupTime
}
func cleanupDensityTest ( dtc DensityTestConfig ) {
defer GinkgoRecover ( )
2016-11-25 17:15:00 +00:00
By ( "Deleting created Collections" )
2016-05-20 14:14:02 +00:00
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
for i := range dtc . Configs {
2016-11-25 17:15:00 +00:00
name := dtc . Configs [ i ] . GetName ( )
namespace := dtc . Configs [ i ] . GetNamespace ( )
kind := dtc . Configs [ i ] . GetKind ( )
2016-12-08 14:13:45 +00:00
if framework . TestContext . GarbageCollectorEnabled && kindSupportsGarbageCollector ( kind ) {
2016-11-25 17:15:00 +00:00
By ( fmt . Sprintf ( "Cleaning up only the %v, garbage collector will clean up the pods" , kind ) )
err := framework . DeleteResourceAndWaitForGC ( dtc . ClientSet , kind , namespace , name )
framework . ExpectNoError ( err )
} else {
By ( fmt . Sprintf ( "Cleaning up the %v and pods" , kind ) )
2016-12-07 14:51:57 +00:00
err := framework . DeleteResourceAndPods ( dtc . ClientSet , dtc . InternalClientset , kind , namespace , name )
2016-11-25 17:15:00 +00:00
framework . ExpectNoError ( err )
2016-05-20 14:14:02 +00:00
}
}
2016-12-01 15:43:27 +00:00
// Delete all secrets
for i := range dtc . SecretConfigs {
dtc . SecretConfigs [ i ] . Stop ( )
}
2016-12-07 14:51:57 +00:00
for i := range dtc . DaemonConfigs {
framework . ExpectNoError ( framework . DeleteResourceAndPods (
dtc . ClientSet ,
dtc . InternalClientset ,
extensions . Kind ( "DaemonSet" ) ,
dtc . DaemonConfigs [ i ] . Namespace ,
dtc . DaemonConfigs [ i ] . Name ,
) )
}
2016-05-20 14:14:02 +00:00
}
2015-11-12 22:30:06 +00:00
// This test suite can take a long time to run, and can affect or be affected by other tests.
// So by default it is added to the ginkgo.skip list (see driver.go).
2015-03-25 23:28:04 +00:00
// To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag.
2016-03-04 10:05:20 +00:00
// IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones
// results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup.
2016-04-07 17:21:31 +00:00
var _ = framework . KubeDescribe ( "Density" , func ( ) {
2016-11-18 20:55:17 +00:00
var c clientset . Interface
2015-09-10 08:40:22 +00:00
var nodeCount int
2016-11-25 17:15:00 +00:00
var name string
2015-06-18 12:35:07 +00:00
var additionalPodsPrefix string
2015-02-23 14:50:40 +00:00
var ns string
2015-05-26 14:24:46 +00:00
var uuid string
2016-02-26 14:37:35 +00:00
var e2eStartupTime time . Duration
var totalPods int
2016-03-01 14:04:08 +00:00
var nodeCpuCapacity int64
var nodeMemCapacity int64
2016-11-18 20:55:17 +00:00
var nodes * v1 . NodeList
2016-05-20 14:14:02 +00:00
var masters sets . String
2015-10-27 13:07:51 +00:00
// Gathers data prior to framework namespace teardown
AfterEach ( func ( ) {
2016-03-04 08:07:52 +00:00
saturationThreshold := time . Duration ( ( totalPods / MinPodsPerSecondThroughput ) ) * time . Second
if saturationThreshold < MinSaturationThreshold {
saturationThreshold = MinSaturationThreshold
}
Expect ( e2eStartupTime ) . NotTo ( BeNumerically ( ">" , saturationThreshold ) )
2016-04-07 17:21:31 +00:00
saturationData := framework . SaturationTime {
2016-02-26 14:37:35 +00:00
TimeToSaturate : e2eStartupTime ,
NumberOfNodes : nodeCount ,
NumberOfPods : totalPods ,
Throughput : float32 ( totalPods ) / float32 ( e2eStartupTime / time . Second ) ,
}
2016-04-07 17:21:31 +00:00
framework . Logf ( "Cluster saturation time: %s" , framework . PrettyPrintJSON ( saturationData ) )
2016-02-26 14:37:35 +00:00
2015-11-18 16:07:26 +00:00
// Verify latency metrics.
2016-04-07 17:21:31 +00:00
highLatencyRequests , err := framework . HighLatencyRequests ( c )
framework . ExpectNoError ( err )
2015-10-27 13:07:51 +00:00
Expect ( highLatencyRequests ) . NotTo ( BeNumerically ( ">" , 0 ) , "There should be no high-latency requests" )
2015-11-18 16:07:26 +00:00
// Verify scheduler metrics.
// TODO: Reset metrics at the beginning of the test.
// We should do something similar to how we do it for APIserver.
2016-10-23 19:14:13 +00:00
if err = framework . VerifySchedulerLatency ( c ) ; err != nil {
framework . Logf ( "Warning: Scheduler latency not calculated, %v" , err )
}
2015-10-27 13:07:51 +00:00
} )
2015-12-03 10:15:44 +00:00
// Explicitly put here, to delete namespace at the end of the test
2016-02-26 10:00:35 +00:00
// (after measuring latency metrics, etc.).
2016-04-07 17:21:31 +00:00
f := framework . NewDefaultFramework ( "density" )
f . NamespaceDeletionTimeout = time . Hour
2015-12-03 10:15:44 +00:00
2015-02-23 14:50:40 +00:00
BeforeEach ( func ( ) {
2016-10-18 13:00:38 +00:00
c = f . ClientSet
2016-04-07 17:21:31 +00:00
ns = f . Namespace . Name
2015-09-16 18:03:21 +00:00
2016-05-20 14:14:02 +00:00
masters , nodes = framework . GetMasterAndWorkerNodesOrDie ( c )
2015-09-10 08:40:22 +00:00
nodeCount = len ( nodes . Items )
Expect ( nodeCount ) . NotTo ( BeZero ( ) )
2015-06-22 11:47:05 +00:00
2016-03-01 14:04:08 +00:00
nodeCpuCapacity = nodes . Items [ 0 ] . Status . Allocatable . Cpu ( ) . MilliValue ( )
nodeMemCapacity = nodes . Items [ 0 ] . Status . Allocatable . Memory ( ) . Value ( )
2015-06-22 11:47:05 +00:00
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
2016-04-07 17:21:31 +00:00
err := framework . CheckTestingNSDeletedExcept ( c , ns )
framework . ExpectNoError ( err )
2015-06-22 11:47:05 +00:00
2016-07-26 15:13:18 +00:00
uuid = string ( utiluuid . NewUUID ( ) )
2015-06-05 23:05:07 +00:00
2016-04-07 17:21:31 +00:00
framework . ExpectNoError ( framework . ResetMetrics ( c ) )
framework . ExpectNoError ( os . Mkdir ( fmt . Sprintf ( framework . TestContext . OutputDir + "/%s" , uuid ) , 0777 ) )
2015-09-16 18:03:21 +00:00
2016-04-07 17:21:31 +00:00
framework . Logf ( "Listing nodes for easy debugging:\n" )
2015-09-16 18:03:21 +00:00
for _ , node := range nodes . Items {
2015-11-18 15:48:39 +00:00
var internalIP , externalIP string
2015-09-16 18:03:21 +00:00
for _ , address := range node . Status . Addresses {
2016-11-18 20:55:17 +00:00
if address . Type == v1 . NodeInternalIP {
2015-11-18 15:48:39 +00:00
internalIP = address . Address
}
2016-11-18 20:55:17 +00:00
if address . Type == v1 . NodeExternalIP {
2015-11-18 15:48:39 +00:00
externalIP = address . Address
2015-09-16 18:03:21 +00:00
}
}
2016-04-07 17:21:31 +00:00
framework . Logf ( "Name: %v, clusterIP: %v, externalIP: %v" , node . ObjectMeta . Name , internalIP , externalIP )
2015-09-16 18:03:21 +00:00
}
2015-02-23 14:50:40 +00:00
} )
2015-04-06 03:37:58 +00:00
type Density struct {
2015-06-03 14:56:16 +00:00
// Controls if e2e latency tests should be run (they are slow)
runLatencyTest bool
2015-09-10 08:40:22 +00:00
podsPerNode int
2015-06-03 14:56:16 +00:00
// Controls how often the apiserver is polled for pods
2015-06-10 11:59:30 +00:00
interval time . Duration
2016-11-25 17:15:00 +00:00
// What kind of resource we should be creating. Default: ReplicationController
2016-12-07 14:51:57 +00:00
kind schema . GroupKind
secretsPerPod int
daemonsPerNode int
2015-03-27 10:14:54 +00:00
}
2015-03-10 23:59:26 +00:00
2015-04-07 11:26:52 +00:00
densityTests := [ ] Density {
2015-06-03 14:56:16 +00:00
// TODO: Expose runLatencyTest as ginkgo flag.
2016-11-25 17:15:00 +00:00
{ podsPerNode : 3 , runLatencyTest : false , kind : api . Kind ( "ReplicationController" ) } ,
{ podsPerNode : 30 , runLatencyTest : true , kind : api . Kind ( "ReplicationController" ) } ,
{ podsPerNode : 50 , runLatencyTest : false , kind : api . Kind ( "ReplicationController" ) } ,
{ podsPerNode : 95 , runLatencyTest : true , kind : api . Kind ( "ReplicationController" ) } ,
{ podsPerNode : 100 , runLatencyTest : false , kind : api . Kind ( "ReplicationController" ) } ,
2015-04-07 11:26:52 +00:00
}
2015-04-06 03:37:58 +00:00
2015-04-07 11:26:52 +00:00
for _ , testArg := range densityTests {
2016-05-26 00:30:12 +00:00
feature := "ManualPerformance"
2016-02-29 21:12:58 +00:00
switch testArg . podsPerNode {
case 30 :
2016-11-25 17:15:00 +00:00
if testArg . kind == api . Kind ( "ReplicationController" ) {
feature = "Performance"
}
2016-02-29 21:12:58 +00:00
case 95 :
2016-05-26 00:30:12 +00:00
feature = "HighDensityPerformance"
2015-04-07 11:26:52 +00:00
}
2016-05-26 00:30:12 +00:00
2016-12-01 15:43:27 +00:00
name := fmt . Sprintf ( "[Feature:%s] should allow starting %d pods per node using %v with %v secrets" , feature , testArg . podsPerNode , testArg . kind , testArg . secretsPerPod )
2015-04-07 11:26:52 +00:00
itArg := testArg
It ( name , func ( ) {
2016-10-20 09:07:36 +00:00
nodePreparer := framework . NewE2ETestNodePreparer (
f . ClientSet ,
2016-10-24 09:42:08 +00:00
[ ] testutils . CountToStrategy { { Count : nodeCount , Strategy : & testutils . TrivialNodePrepareStrategy { } } } ,
2016-10-20 09:07:36 +00:00
)
framework . ExpectNoError ( nodePreparer . PrepareNodes ( ) )
defer nodePreparer . CleanupNodes ( )
2016-05-20 14:14:02 +00:00
podsPerNode := itArg . podsPerNode
2016-08-18 11:41:17 +00:00
if podsPerNode == 30 {
f . AddonResourceConstraints = func ( ) map [ string ] framework . ResourceConstraint { return density30AddonResourceVerifier ( nodeCount ) } ( )
}
2016-12-07 14:51:57 +00:00
totalPods = ( podsPerNode - itArg . daemonsPerNode ) * nodeCount
2016-04-07 17:21:31 +00:00
fileHndl , err := os . Create ( fmt . Sprintf ( framework . TestContext . OutputDir + "/%s/pod_states.csv" , uuid ) )
framework . ExpectNoError ( err )
2015-05-26 14:24:46 +00:00
defer fileHndl . Close ( )
2016-05-20 14:14:02 +00:00
2016-10-28 11:50:39 +00:00
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
2016-11-25 17:15:00 +00:00
numberOfCollections := ( nodeCount + nodeCountPerNamespace - 1 ) / nodeCountPerNamespace
namespaces , err := CreateNamespaces ( f , numberOfCollections , fmt . Sprintf ( "density-%v" , testArg . podsPerNode ) )
2016-10-28 11:50:39 +00:00
framework . ExpectNoError ( err )
2016-11-25 17:15:00 +00:00
configs := make ( [ ] testutils . RunObjectConfig , numberOfCollections )
2016-12-01 15:43:27 +00:00
secretConfigs := make ( [ ] * testutils . SecretConfig , 0 , numberOfCollections * itArg . secretsPerPod )
2016-11-01 09:56:20 +00:00
// Since all RCs are created at the same time, timeout for each config
// has to assume that it will be run at the very end.
podThroughput := 20
timeout := time . Duration ( totalPods / podThroughput ) * time . Second + 3 * time . Minute
2016-11-02 10:20:24 +00:00
// createClients is defined in load.go
2016-11-25 17:15:00 +00:00
clients , internalClients , err := createClients ( numberOfCollections )
for i := 0 ; i < numberOfCollections ; i ++ {
2016-10-28 11:50:39 +00:00
nsName := namespaces [ i ] . Name
2016-12-01 15:43:27 +00:00
secretNames := [ ] string { }
for j := 0 ; j < itArg . secretsPerPod ; j ++ {
secretName := fmt . Sprintf ( "density-secret-%v-%v" , i , j )
secretConfigs = append ( secretConfigs , & testutils . SecretConfig {
Content : map [ string ] string { "foo" : "bar" } ,
Client : clients [ i ] ,
Name : secretName ,
Namespace : nsName ,
LogFunc : framework . Logf ,
} )
secretNames = append ( secretNames , secretName )
}
name := fmt . Sprintf ( "density%v-%v-%v" , totalPods , i , uuid )
2016-11-25 17:15:00 +00:00
baseConfig := & testutils . RCConfig {
2016-11-02 10:20:24 +00:00
Client : clients [ i ] ,
2016-11-18 20:55:17 +00:00
InternalClient : internalClients [ i ] ,
2016-10-18 13:00:38 +00:00
Image : framework . GetPauseImageName ( f . ClientSet ) ,
2016-11-25 17:15:00 +00:00
Name : name ,
2016-10-28 11:50:39 +00:00
Namespace : nsName ,
2016-04-18 23:13:19 +00:00
Labels : map [ string ] string { "type" : "densityPod" } ,
2016-11-02 10:20:24 +00:00
PollInterval : DensityPollInterval ,
2016-11-01 09:56:20 +00:00
Timeout : timeout ,
2016-04-18 23:13:19 +00:00
PodStatusFile : fileHndl ,
2016-11-25 17:15:00 +00:00
Replicas : ( totalPods + numberOfCollections - 1 ) / numberOfCollections ,
2016-04-18 23:13:19 +00:00
CpuRequest : nodeCpuCapacity / 100 ,
MemRequest : nodeMemCapacity / 100 ,
MaxContainerFailures : & MaxContainerFailures ,
Silent : true ,
2016-12-01 15:43:27 +00:00
LogFunc : framework . Logf ,
SecretNames : secretNames ,
2016-04-18 23:13:19 +00:00
}
2016-11-25 17:15:00 +00:00
switch itArg . kind {
case api . Kind ( "ReplicationController" ) :
configs [ i ] = baseConfig
case extensions . Kind ( "ReplicaSet" ) :
configs [ i ] = & testutils . ReplicaSetConfig { RCConfig : * baseConfig }
case extensions . Kind ( "Deployment" ) :
configs [ i ] = & testutils . DeploymentConfig { RCConfig : * baseConfig }
2016-12-08 14:13:45 +00:00
case batch . Kind ( "Job" ) :
configs [ i ] = & testutils . JobConfig { RCConfig : * baseConfig }
2016-11-25 17:15:00 +00:00
default :
framework . Failf ( "Unsupported kind: %v" , itArg . kind )
}
2015-05-26 14:24:46 +00:00
}
2015-05-15 18:34:34 +00:00
2016-09-21 14:20:25 +00:00
dConfig := DensityTestConfig {
2016-11-18 20:55:17 +00:00
ClientSet : f . ClientSet ,
InternalClientset : f . InternalClientset ,
2016-11-25 17:15:00 +00:00
Configs : configs ,
2016-11-18 20:55:17 +00:00
PodCount : totalPods ,
PollInterval : DensityPollInterval ,
2016-11-25 17:15:00 +00:00
kind : itArg . kind ,
2016-12-01 15:43:27 +00:00
SecretConfigs : secretConfigs ,
2016-02-29 10:06:17 +00:00
}
2016-12-07 14:51:57 +00:00
for i := 0 ; i < itArg . daemonsPerNode ; i ++ {
dConfig . DaemonConfigs = append ( dConfig . DaemonConfigs ,
& testutils . DaemonConfig {
Client : f . ClientSet ,
Name : fmt . Sprintf ( "density-daemon-%v" , i ) ,
Namespace : f . Namespace . Name ,
LogFunc : framework . Logf ,
} )
}
2016-05-20 14:14:02 +00:00
e2eStartupTime = runDensityTest ( dConfig )
2015-06-03 14:56:16 +00:00
if itArg . runLatencyTest {
2016-02-29 10:06:17 +00:00
By ( "Scheduling additional Pods to measure startup latencies" )
2015-06-03 14:56:16 +00:00
2016-12-03 18:57:26 +00:00
createTimes := make ( map [ string ] metav1 . Time , 0 )
2016-10-28 11:50:39 +00:00
nodeNames := make ( map [ string ] string , 0 )
2016-12-03 18:57:26 +00:00
scheduleTimes := make ( map [ string ] metav1 . Time , 0 )
runTimes := make ( map [ string ] metav1 . Time , 0 )
watchTimes := make ( map [ string ] metav1 . Time , 0 )
2015-06-03 14:56:16 +00:00
var mutex sync . Mutex
2016-11-18 20:55:17 +00:00
checkPod := func ( p * v1 . Pod ) {
2015-06-03 14:56:16 +00:00
mutex . Lock ( )
defer mutex . Unlock ( )
defer GinkgoRecover ( )
2016-11-18 20:55:17 +00:00
if p . Status . Phase == v1 . PodRunning {
2015-06-03 14:56:16 +00:00
if _ , found := watchTimes [ p . Name ] ; ! found {
2016-12-03 18:57:26 +00:00
watchTimes [ p . Name ] = metav1 . Now ( )
2015-06-03 14:56:16 +00:00
createTimes [ p . Name ] = p . CreationTimestamp
2016-10-28 11:50:39 +00:00
nodeNames [ p . Name ] = p . Spec . NodeName
2016-12-03 18:57:26 +00:00
var startTime metav1 . Time
2015-06-03 14:56:16 +00:00
for _ , cs := range p . Status . ContainerStatuses {
if cs . State . Running != nil {
if startTime . Before ( cs . State . Running . StartedAt ) {
startTime = cs . State . Running . StartedAt
}
}
}
2016-12-03 18:57:26 +00:00
if startTime != metav1 . NewTime ( time . Time { } ) {
2015-06-03 14:56:16 +00:00
runTimes [ p . Name ] = startTime
} else {
2016-04-07 17:21:31 +00:00
framework . Failf ( "Pod %v is reported to be running, but none of its containers is" , p . Name )
2015-06-03 14:56:16 +00:00
}
}
}
}
2016-03-14 10:44:35 +00:00
additionalPodsPrefix = "density-latency-pod"
2016-10-28 11:50:39 +00:00
stopCh := make ( chan struct { } )
latencyPodStores := make ( [ ] cache . Store , len ( namespaces ) )
for i := 0 ; i < len ( namespaces ) ; i ++ {
nsName := namespaces [ i ] . Name
latencyPodsStore , controller := cache . NewInformer (
& cache . ListWatch {
2016-11-18 20:55:17 +00:00
ListFunc : func ( options v1 . ListOptions ) ( runtime . Object , error ) {
options . LabelSelector = labels . SelectorFromSet ( labels . Set { "type" : additionalPodsPrefix } ) . String ( )
2016-10-28 11:50:39 +00:00
obj , err := c . Core ( ) . Pods ( nsName ) . List ( options )
return runtime . Object ( obj ) , err
} ,
2016-11-18 20:55:17 +00:00
WatchFunc : func ( options v1 . ListOptions ) ( watch . Interface , error ) {
options . LabelSelector = labels . SelectorFromSet ( labels . Set { "type" : additionalPodsPrefix } ) . String ( )
2016-10-28 11:50:39 +00:00
return c . Core ( ) . Pods ( nsName ) . Watch ( options )
} ,
2015-06-03 14:56:16 +00:00
} ,
2016-11-18 20:55:17 +00:00
& v1 . Pod { } ,
2016-10-28 11:50:39 +00:00
0 ,
cache . ResourceEventHandlerFuncs {
AddFunc : func ( obj interface { } ) {
2016-11-18 20:55:17 +00:00
p , ok := obj . ( * v1 . Pod )
2016-10-28 11:50:39 +00:00
if ! ok {
2016-11-18 20:55:17 +00:00
framework . Logf ( "Failed to cast observed object to *v1.Pod." )
2016-10-28 11:50:39 +00:00
}
Expect ( ok ) . To ( Equal ( true ) )
go checkPod ( p )
} ,
UpdateFunc : func ( oldObj , newObj interface { } ) {
2016-11-18 20:55:17 +00:00
p , ok := newObj . ( * v1 . Pod )
2016-10-28 11:50:39 +00:00
if ! ok {
2016-11-18 20:55:17 +00:00
framework . Logf ( "Failed to cast observed object to *v1.Pod." )
2016-10-28 11:50:39 +00:00
}
Expect ( ok ) . To ( Equal ( true ) )
go checkPod ( p )
} ,
2015-06-03 14:56:16 +00:00
} ,
2016-10-28 11:50:39 +00:00
)
latencyPodStores [ i ] = latencyPodsStore
2015-06-03 14:56:16 +00:00
2016-10-28 11:50:39 +00:00
go controller . Run ( stopCh )
}
2015-06-03 14:56:16 +00:00
2015-06-18 12:35:07 +00:00
// Create some additional pods with throughput ~5 pods/sec.
var wg sync . WaitGroup
2015-09-10 08:40:22 +00:00
wg . Add ( nodeCount )
2016-03-01 14:04:08 +00:00
// Explicitly set requests here.
// Thanks to it we trigger increasing priority function by scheduling
// a pod to a node, which in turn will result in spreading latency pods
// more evenly between nodes.
cpuRequest := * resource . NewMilliQuantity ( nodeCpuCapacity / 5 , resource . DecimalSI )
memRequest := * resource . NewQuantity ( nodeMemCapacity / 5 , resource . DecimalSI )
if podsPerNode > 30 {
// This is to make them schedulable on high-density tests
// (e.g. 100 pods/node kubemark).
cpuRequest = * resource . NewMilliQuantity ( 0 , resource . DecimalSI )
memRequest = * resource . NewQuantity ( 0 , resource . DecimalSI )
}
2016-10-28 11:50:39 +00:00
rcNameToNsMap := map [ string ] string { }
2015-09-10 08:40:22 +00:00
for i := 1 ; i <= nodeCount ; i ++ {
2015-06-18 12:35:07 +00:00
name := additionalPodsPrefix + "-" + strconv . Itoa ( i )
2016-10-28 11:50:39 +00:00
nsName := namespaces [ i % len ( namespaces ) ] . Name
rcNameToNsMap [ name ] = nsName
go createRunningPodFromRC ( & wg , c , name , nsName , framework . GetPauseImageName ( f . ClientSet ) , additionalPodsPrefix , cpuRequest , memRequest )
2015-06-18 12:35:07 +00:00
time . Sleep ( 200 * time . Millisecond )
2015-06-03 14:56:16 +00:00
}
2015-06-18 12:35:07 +00:00
wg . Wait ( )
2015-06-03 14:56:16 +00:00
2016-02-29 10:06:17 +00:00
By ( "Waiting for all Pods begin observed by the watch..." )
2016-11-01 09:56:20 +00:00
waitTimeout := 10 * time . Minute
2015-09-08 19:34:55 +00:00
for start := time . Now ( ) ; len ( watchTimes ) < nodeCount ; time . Sleep ( 10 * time . Second ) {
2016-11-01 09:56:20 +00:00
if time . Since ( start ) < waitTimeout {
2016-04-07 17:21:31 +00:00
framework . Failf ( "Timeout reached waiting for all Pods being observed by the watch." )
2015-09-08 19:34:55 +00:00
}
2015-06-03 14:56:16 +00:00
}
close ( stopCh )
2016-03-01 14:04:08 +00:00
nodeToLatencyPods := make ( map [ string ] int )
2016-10-28 11:50:39 +00:00
for i := range latencyPodStores {
for _ , item := range latencyPodStores [ i ] . List ( ) {
2016-11-18 20:55:17 +00:00
pod := item . ( * v1 . Pod )
2016-10-28 11:50:39 +00:00
nodeToLatencyPods [ pod . Spec . NodeName ] ++
}
for node , count := range nodeToLatencyPods {
if count > 1 {
framework . Logf ( "%d latency pods scheduled on %s" , count , node )
}
2016-03-01 14:04:08 +00:00
}
}
2016-10-28 11:50:39 +00:00
for i := 0 ; i < len ( namespaces ) ; i ++ {
nsName := namespaces [ i ] . Name
selector := fields . Set {
"involvedObject.kind" : "Pod" ,
"involvedObject.namespace" : nsName ,
2016-11-18 20:55:17 +00:00
"source" : v1 . DefaultSchedulerName ,
} . AsSelector ( ) . String ( )
options := v1 . ListOptions { FieldSelector : selector }
2016-10-28 11:50:39 +00:00
schedEvents , err := c . Core ( ) . Events ( nsName ) . List ( options )
framework . ExpectNoError ( err )
for k := range createTimes {
for _ , event := range schedEvents . Items {
if event . InvolvedObject . Name == k {
scheduleTimes [ k ] = event . FirstTimestamp
break
}
2015-06-03 14:56:16 +00:00
}
}
}
2016-04-07 17:21:31 +00:00
scheduleLag := make ( [ ] framework . PodLatencyData , 0 )
startupLag := make ( [ ] framework . PodLatencyData , 0 )
watchLag := make ( [ ] framework . PodLatencyData , 0 )
schedToWatchLag := make ( [ ] framework . PodLatencyData , 0 )
e2eLag := make ( [ ] framework . PodLatencyData , 0 )
2015-06-03 14:56:16 +00:00
for name , create := range createTimes {
sched , ok := scheduleTimes [ name ]
2016-10-28 11:50:39 +00:00
if ! ok {
framework . Logf ( "Failed to find schedule time for %v" , name )
}
2015-06-03 14:56:16 +00:00
Expect ( ok ) . To ( Equal ( true ) )
run , ok := runTimes [ name ]
2016-10-28 11:50:39 +00:00
if ! ok {
framework . Logf ( "Failed to find run time for %v" , name )
}
2015-06-03 14:56:16 +00:00
Expect ( ok ) . To ( Equal ( true ) )
watch , ok := watchTimes [ name ]
2016-10-28 11:50:39 +00:00
if ! ok {
framework . Logf ( "Failed to find watch time for %v" , name )
}
2015-06-03 14:56:16 +00:00
Expect ( ok ) . To ( Equal ( true ) )
2016-10-28 11:50:39 +00:00
node , ok := nodeNames [ name ]
if ! ok {
framework . Logf ( "Failed to find node for %v" , name )
}
2015-06-19 17:04:37 +00:00
Expect ( ok ) . To ( Equal ( true ) )
2016-04-15 19:18:27 +00:00
scheduleLag = append ( scheduleLag , framework . PodLatencyData { Name : name , Node : node , Latency : sched . Time . Sub ( create . Time ) } )
startupLag = append ( startupLag , framework . PodLatencyData { Name : name , Node : node , Latency : run . Time . Sub ( sched . Time ) } )
watchLag = append ( watchLag , framework . PodLatencyData { Name : name , Node : node , Latency : watch . Time . Sub ( run . Time ) } )
schedToWatchLag = append ( schedToWatchLag , framework . PodLatencyData { Name : name , Node : node , Latency : watch . Time . Sub ( sched . Time ) } )
e2eLag = append ( e2eLag , framework . PodLatencyData { Name : name , Node : node , Latency : watch . Time . Sub ( create . Time ) } )
2015-06-03 14:56:16 +00:00
}
2016-04-07 17:21:31 +00:00
sort . Sort ( framework . LatencySlice ( scheduleLag ) )
sort . Sort ( framework . LatencySlice ( startupLag ) )
sort . Sort ( framework . LatencySlice ( watchLag ) )
sort . Sort ( framework . LatencySlice ( schedToWatchLag ) )
sort . Sort ( framework . LatencySlice ( e2eLag ) )
2015-06-03 14:56:16 +00:00
2016-04-07 17:21:31 +00:00
framework . PrintLatencies ( scheduleLag , "worst schedule latencies" )
framework . PrintLatencies ( startupLag , "worst run-after-schedule latencies" )
framework . PrintLatencies ( watchLag , "worst watch latencies" )
framework . PrintLatencies ( schedToWatchLag , "worst scheduled-to-end total latencies" )
framework . PrintLatencies ( e2eLag , "worst e2e total latencies" )
2015-06-03 14:56:16 +00:00
2015-09-08 10:05:14 +00:00
// Test whether e2e pod startup time is acceptable.
2016-04-07 17:21:31 +00:00
podStartupLatency := framework . PodStartupLatency { Latency : framework . ExtractLatencyMetrics ( e2eLag ) }
framework . ExpectNoError ( framework . VerifyPodStartupLatency ( podStartupLatency ) )
2015-09-08 10:05:14 +00:00
2016-04-07 17:21:31 +00:00
framework . LogSuspiciousLatency ( startupLag , e2eLag , nodeCount , c )
2016-08-19 08:14:50 +00:00
By ( "Removing additional replication controllers" )
2016-09-16 11:09:45 +00:00
deleteRC := func ( i int ) {
2016-11-02 08:16:20 +00:00
defer GinkgoRecover ( )
2016-09-16 11:09:45 +00:00
name := additionalPodsPrefix + "-" + strconv . Itoa ( i + 1 )
2016-10-28 11:50:39 +00:00
framework . ExpectNoError ( framework . DeleteRCAndWaitForGC ( c , rcNameToNsMap [ name ] , name ) )
2016-08-19 08:14:50 +00:00
}
2016-09-16 11:09:45 +00:00
workqueue . Parallelize ( 16 , nodeCount , deleteRC )
2015-06-03 14:56:16 +00:00
}
2016-02-02 13:55:27 +00:00
2016-05-20 14:14:02 +00:00
cleanupDensityTest ( dConfig )
2015-04-07 11:26:52 +00:00
} )
}
2016-05-20 14:14:02 +00:00
// Calculate total number of pods from each node's max-pod
It ( "[Feature:ManualPerformance] should allow running maximum capacity pods on nodes" , func ( ) {
totalPods = 0
for _ , n := range nodes . Items {
totalPods += int ( n . Status . Capacity . Pods ( ) . Value ( ) )
}
totalPods -= framework . WaitForStableCluster ( c , masters )
fileHndl , err := os . Create ( fmt . Sprintf ( framework . TestContext . OutputDir + "/%s/pod_states.csv" , uuid ) )
framework . ExpectNoError ( err )
defer fileHndl . Close ( )
2016-11-25 17:15:00 +00:00
collectionCount := 1
configs := make ( [ ] testutils . RunObjectConfig , collectionCount )
podsPerCollection := int ( totalPods / collectionCount )
for i := 0 ; i < collectionCount ; i ++ {
if i == collectionCount - 1 {
podsPerCollection += int ( math . Mod ( float64 ( totalPods ) , float64 ( collectionCount ) ) )
2016-05-20 14:14:02 +00:00
}
2016-11-25 17:15:00 +00:00
name = "density" + strconv . Itoa ( totalPods ) + "-" + strconv . Itoa ( i ) + "-" + uuid
configs [ i ] = & testutils . RCConfig { Client : c ,
2016-10-18 13:00:38 +00:00
Image : framework . GetPauseImageName ( f . ClientSet ) ,
2016-11-25 17:15:00 +00:00
Name : name ,
2016-05-20 14:14:02 +00:00
Namespace : ns ,
Labels : map [ string ] string { "type" : "densityPod" } ,
2016-11-02 10:20:24 +00:00
PollInterval : DensityPollInterval ,
2016-05-20 14:14:02 +00:00
PodStatusFile : fileHndl ,
2016-11-25 17:15:00 +00:00
Replicas : podsPerCollection ,
2016-05-20 14:14:02 +00:00
MaxContainerFailures : & MaxContainerFailures ,
Silent : true ,
2016-12-01 15:43:27 +00:00
LogFunc : framework . Logf ,
2016-05-20 14:14:02 +00:00
}
}
2016-09-21 14:20:25 +00:00
dConfig := DensityTestConfig {
ClientSet : f . ClientSet ,
2016-11-25 17:15:00 +00:00
Configs : configs ,
2016-05-20 14:14:02 +00:00
PodCount : totalPods ,
2016-11-02 10:20:24 +00:00
PollInterval : DensityPollInterval ,
2016-05-20 14:14:02 +00:00
}
e2eStartupTime = runDensityTest ( dConfig )
cleanupDensityTest ( dConfig )
} )
2015-02-23 14:50:40 +00:00
} )
2015-06-18 12:35:07 +00:00
2016-11-18 20:55:17 +00:00
func createRunningPodFromRC ( wg * sync . WaitGroup , c clientset . Interface , name , ns , image , podType string , cpuRequest , memRequest resource . Quantity ) {
2015-06-18 12:35:07 +00:00
defer GinkgoRecover ( )
defer wg . Done ( )
2016-03-14 10:44:35 +00:00
labels := map [ string ] string {
"type" : podType ,
"name" : name ,
}
2016-11-18 20:55:17 +00:00
rc := & v1 . ReplicationController {
ObjectMeta : v1 . ObjectMeta {
2015-06-18 12:35:07 +00:00
Name : name ,
Labels : labels ,
} ,
2016-11-18 20:55:17 +00:00
Spec : v1 . ReplicationControllerSpec {
Replicas : func ( i int ) * int32 { x := int32 ( i ) ; return & x } ( 1 ) ,
2016-03-14 10:44:35 +00:00
Selector : labels ,
2016-11-18 20:55:17 +00:00
Template : & v1 . PodTemplateSpec {
ObjectMeta : v1 . ObjectMeta {
2016-03-14 10:44:35 +00:00
Labels : labels ,
} ,
2016-11-18 20:55:17 +00:00
Spec : v1 . PodSpec {
Containers : [ ] v1 . Container {
2016-03-14 10:44:35 +00:00
{
Name : name ,
Image : image ,
2016-11-18 20:55:17 +00:00
Resources : v1 . ResourceRequirements {
Requests : v1 . ResourceList {
v1 . ResourceCPU : cpuRequest ,
v1 . ResourceMemory : memRequest ,
2016-03-14 10:44:35 +00:00
} ,
} ,
2016-03-01 14:04:08 +00:00
} ,
} ,
2016-11-18 20:55:17 +00:00
DNSPolicy : v1 . DNSDefault ,
2015-06-18 12:35:07 +00:00
} ,
} ,
} ,
}
2016-10-18 13:00:38 +00:00
_ , err := c . Core ( ) . ReplicationControllers ( ns ) . Create ( rc )
2016-04-07 17:21:31 +00:00
framework . ExpectNoError ( err )
2016-11-25 17:15:00 +00:00
framework . ExpectNoError ( framework . WaitForControlledPodsRunning ( c , ns , name , api . Kind ( "ReplicationController" ) ) )
2016-04-07 17:21:31 +00:00
framework . Logf ( "Found pod '%s' running" , name )
2015-06-18 12:35:07 +00:00
}
2016-12-08 14:13:45 +00:00
func kindSupportsGarbageCollector ( kind schema . GroupKind ) bool {
return kind != extensions . Kind ( "Deployment" ) && kind != batch . Kind ( "Job" )
}