2015-02-23 14:50:40 +00:00
|
|
|
/*
|
2016-06-03 00:25:58 +00:00
|
|
|
Copyright 2015 The Kubernetes Authors.
|
2015-02-23 14:50:40 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-04-28 11:58:20 +00:00
|
|
|
"math"
|
2015-05-26 14:24:46 +00:00
|
|
|
"os"
|
2015-06-03 14:56:16 +00:00
|
|
|
"sort"
|
2015-03-27 10:14:54 +00:00
|
|
|
"strconv"
|
2015-06-03 14:56:16 +00:00
|
|
|
"sync"
|
2015-02-23 14:50:40 +00:00
|
|
|
"time"
|
|
|
|
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
2016-03-01 14:04:08 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/resource"
|
2015-09-09 21:59:11 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
2015-09-03 21:40:58 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/cache"
|
2015-09-03 21:43:19 +00:00
|
|
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
2015-10-16 08:01:58 +00:00
|
|
|
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/fields"
|
|
|
|
"k8s.io/kubernetes/pkg/labels"
|
|
|
|
"k8s.io/kubernetes/pkg/runtime"
|
2016-05-20 14:14:02 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/sets"
|
2016-07-26 15:13:18 +00:00
|
|
|
utiluuid "k8s.io/kubernetes/pkg/util/uuid"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/watch"
|
2016-04-07 17:21:31 +00:00
|
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
2015-02-23 14:50:40 +00:00
|
|
|
|
|
|
|
. "github.com/onsi/ginkgo"
|
|
|
|
. "github.com/onsi/gomega"
|
|
|
|
)
|
|
|
|
|
2016-02-26 14:37:35 +00:00
|
|
|
const (
|
|
|
|
MinSaturationThreshold = 2 * time.Minute
|
2016-03-04 08:07:52 +00:00
|
|
|
MinPodsPerSecondThroughput = 8
|
2016-02-26 14:37:35 +00:00
|
|
|
)
|
2015-06-19 17:04:37 +00:00
|
|
|
|
2015-07-07 01:11:30 +00:00
|
|
|
// Maximum container failures this test tolerates before failing.
|
|
|
|
var MaxContainerFailures = 0
|
|
|
|
|
2016-05-20 14:14:02 +00:00
|
|
|
type DensityTestConfig struct {
|
|
|
|
Configs []framework.RCConfig
|
|
|
|
Client *client.Client
|
|
|
|
Namespace string
|
|
|
|
PollInterval time.Duration
|
|
|
|
PodCount int
|
|
|
|
Timeout time.Duration
|
|
|
|
}
|
|
|
|
|
2016-05-31 07:29:04 +00:00
|
|
|
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
|
|
|
|
var apiserverMem uint64
|
|
|
|
var controllerMem uint64
|
|
|
|
var schedulerMem uint64
|
|
|
|
apiserverCPU := math.MaxFloat32
|
|
|
|
apiserverMem = math.MaxUint64
|
|
|
|
controllerCPU := math.MaxFloat32
|
|
|
|
controllerMem = math.MaxUint64
|
|
|
|
schedulerCPU := math.MaxFloat32
|
|
|
|
schedulerMem = math.MaxUint64
|
|
|
|
if framework.ProviderIs("kubemark") {
|
|
|
|
if numNodes <= 5 {
|
|
|
|
apiserverCPU = 0.15
|
|
|
|
apiserverMem = 150 * (1024 * 1024)
|
|
|
|
controllerCPU = 0.1
|
|
|
|
controllerMem = 100 * (1024 * 1024)
|
|
|
|
schedulerCPU = 0.05
|
|
|
|
schedulerMem = 50 * (1024 * 1024)
|
|
|
|
} else if numNodes <= 100 {
|
|
|
|
apiserverCPU = 1.5
|
|
|
|
apiserverMem = 1500 * (1024 * 1024)
|
|
|
|
controllerCPU = 0.75
|
|
|
|
controllerMem = 750 * (1024 * 1024)
|
|
|
|
schedulerCPU = 0.75
|
|
|
|
schedulerMem = 500 * (1024 * 1024)
|
|
|
|
} else if numNodes <= 500 {
|
|
|
|
apiserverCPU = 2.25
|
|
|
|
apiserverMem = 2500 * (1024 * 1024)
|
|
|
|
controllerCPU = 1.0
|
|
|
|
controllerMem = 1100 * (1024 * 1024)
|
|
|
|
schedulerCPU = 0.8
|
|
|
|
schedulerMem = 500 * (1024 * 1024)
|
|
|
|
} else if numNodes <= 1000 {
|
|
|
|
apiserverCPU = 4
|
|
|
|
apiserverMem = 4000 * (1024 * 1024)
|
|
|
|
controllerCPU = 3
|
|
|
|
controllerMem = 2000 * (1024 * 1024)
|
|
|
|
schedulerCPU = 1.5
|
|
|
|
schedulerMem = 750 * (1024 * 1024)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if numNodes <= 100 {
|
|
|
|
apiserverCPU = 1.5
|
|
|
|
apiserverMem = 1300 * (1024 * 1024)
|
|
|
|
controllerCPU = 0.5
|
|
|
|
controllerMem = 300 * (1024 * 1024)
|
|
|
|
schedulerCPU = 0.4
|
|
|
|
schedulerMem = 150 * (1024 * 1024)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-07 17:21:31 +00:00
|
|
|
constraints := make(map[string]framework.ResourceConstraint)
|
|
|
|
constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
|
2016-05-25 06:16:01 +00:00
|
|
|
CPUConstraint: 0.2,
|
2016-04-07 17:21:31 +00:00
|
|
|
MemoryConstraint: 250 * (1024 * 1024),
|
2015-11-30 14:29:40 +00:00
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
constraints["elasticsearch-logging"] = framework.ResourceConstraint{
|
|
|
|
CPUConstraint: 2,
|
2015-12-29 11:17:42 +00:00
|
|
|
// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
|
2016-04-07 17:21:31 +00:00
|
|
|
MemoryConstraint: 5000 * (1024 * 1024),
|
2015-11-30 14:29:40 +00:00
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
constraints["heapster"] = framework.ResourceConstraint{
|
|
|
|
CPUConstraint: 2,
|
|
|
|
MemoryConstraint: 1800 * (1024 * 1024),
|
2015-11-30 14:29:40 +00:00
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
constraints["kibana-logging"] = framework.ResourceConstraint{
|
|
|
|
CPUConstraint: 0.2,
|
|
|
|
MemoryConstraint: 100 * (1024 * 1024),
|
2015-11-30 14:29:40 +00:00
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
constraints["kube-proxy"] = framework.ResourceConstraint{
|
|
|
|
CPUConstraint: 0.05,
|
|
|
|
MemoryConstraint: 20 * (1024 * 1024),
|
2015-11-30 14:29:40 +00:00
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
constraints["l7-lb-controller"] = framework.ResourceConstraint{
|
|
|
|
CPUConstraint: 0.05,
|
|
|
|
MemoryConstraint: 20 * (1024 * 1024),
|
2015-11-30 14:29:40 +00:00
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
constraints["influxdb"] = framework.ResourceConstraint{
|
|
|
|
CPUConstraint: 2,
|
|
|
|
MemoryConstraint: 500 * (1024 * 1024),
|
2015-11-30 14:29:40 +00:00
|
|
|
}
|
2016-05-31 07:29:04 +00:00
|
|
|
constraints["kube-apiserver"] = framework.ResourceConstraint{
|
|
|
|
CPUConstraint: apiserverCPU,
|
|
|
|
MemoryConstraint: apiserverMem,
|
|
|
|
}
|
|
|
|
constraints["kube-controller-manager"] = framework.ResourceConstraint{
|
|
|
|
CPUConstraint: controllerCPU,
|
|
|
|
MemoryConstraint: controllerMem,
|
|
|
|
}
|
|
|
|
constraints["kube-scheduler"] = framework.ResourceConstraint{
|
|
|
|
CPUConstraint: schedulerCPU,
|
|
|
|
MemoryConstraint: schedulerMem,
|
|
|
|
}
|
2015-11-30 14:29:40 +00:00
|
|
|
return constraints
|
|
|
|
}
|
|
|
|
|
2016-04-18 23:13:19 +00:00
|
|
|
func logPodStartupStatus(c *client.Client, expectedPods int, ns string, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
|
|
|
|
label := labels.SelectorFromSet(labels.Set(observedLabels))
|
|
|
|
podStore := framework.NewPodStore(c, ns, label, fields.Everything())
|
|
|
|
defer podStore.Stop()
|
|
|
|
ticker := time.NewTicker(period)
|
2016-07-18 14:20:22 +00:00
|
|
|
defer ticker.Stop()
|
2016-04-18 23:13:19 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
pods := podStore.List()
|
|
|
|
startupStatus := framework.ComputeRCStartupStatus(pods, expectedPods)
|
|
|
|
startupStatus.Print("Density")
|
|
|
|
case <-stopCh:
|
|
|
|
pods := podStore.List()
|
|
|
|
startupStatus := framework.ComputeRCStartupStatus(pods, expectedPods)
|
|
|
|
startupStatus.Print("Density")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:14:02 +00:00
|
|
|
// runDensityTest will perform a density test and return the time it took for
|
|
|
|
// all pods to start
|
|
|
|
func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|
|
|
defer GinkgoRecover()
|
|
|
|
// Create a listener for events.
|
|
|
|
// eLock is a lock protects the events
|
|
|
|
var eLock sync.Mutex
|
|
|
|
events := make([](*api.Event), 0)
|
|
|
|
_, controller := controllerframework.NewInformer(
|
|
|
|
&cache.ListWatch{
|
|
|
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
|
|
|
return dtc.Client.Events(dtc.Namespace).List(options)
|
|
|
|
},
|
|
|
|
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
|
|
|
return dtc.Client.Events(dtc.Namespace).Watch(options)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&api.Event{},
|
|
|
|
0,
|
|
|
|
controllerframework.ResourceEventHandlerFuncs{
|
|
|
|
AddFunc: func(obj interface{}) {
|
|
|
|
eLock.Lock()
|
|
|
|
defer eLock.Unlock()
|
|
|
|
events = append(events, obj.(*api.Event))
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
stop := make(chan struct{})
|
|
|
|
go controller.Run(stop)
|
|
|
|
|
|
|
|
// Create a listener for api updates
|
|
|
|
// uLock is a lock protects the updateCount
|
|
|
|
var uLock sync.Mutex
|
|
|
|
updateCount := 0
|
|
|
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"type": "densityPod"}))
|
|
|
|
_, updateController := controllerframework.NewInformer(
|
|
|
|
&cache.ListWatch{
|
|
|
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
|
|
|
options.LabelSelector = label
|
|
|
|
return dtc.Client.Pods(dtc.Namespace).List(options)
|
|
|
|
},
|
|
|
|
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
|
|
|
options.LabelSelector = label
|
|
|
|
return dtc.Client.Pods(dtc.Namespace).Watch(options)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&api.Pod{},
|
|
|
|
0,
|
|
|
|
controllerframework.ResourceEventHandlerFuncs{
|
|
|
|
UpdateFunc: func(_, _ interface{}) {
|
|
|
|
uLock.Lock()
|
|
|
|
defer uLock.Unlock()
|
|
|
|
updateCount++
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
go updateController.Run(stop)
|
|
|
|
|
|
|
|
// Start all replication controllers.
|
|
|
|
startTime := time.Now()
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
wg.Add(len(dtc.Configs))
|
|
|
|
for i := range dtc.Configs {
|
|
|
|
rcConfig := dtc.Configs[i]
|
|
|
|
go func() {
|
|
|
|
framework.ExpectNoError(framework.RunRC(rcConfig))
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
logStopCh := make(chan struct{})
|
|
|
|
go logPodStartupStatus(dtc.Client, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
|
|
|
|
wg.Wait()
|
|
|
|
startupTime := time.Now().Sub(startTime)
|
|
|
|
close(logStopCh)
|
|
|
|
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
|
|
|
|
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
|
|
|
|
|
|
|
|
By("Waiting for all events to be recorded")
|
|
|
|
last := -1
|
|
|
|
current := len(events)
|
|
|
|
lastCount := -1
|
|
|
|
currentCount := updateCount
|
|
|
|
for start := time.Now(); (last < current || lastCount < currentCount) && time.Since(start) < dtc.Timeout; time.Sleep(10 * time.Second) {
|
|
|
|
func() {
|
|
|
|
eLock.Lock()
|
|
|
|
defer eLock.Unlock()
|
|
|
|
last = current
|
|
|
|
current = len(events)
|
|
|
|
}()
|
|
|
|
func() {
|
|
|
|
uLock.Lock()
|
|
|
|
defer uLock.Unlock()
|
|
|
|
lastCount = currentCount
|
|
|
|
currentCount = updateCount
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
close(stop)
|
|
|
|
|
|
|
|
if current != last {
|
|
|
|
framework.Logf("Warning: Not all events were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
|
|
|
|
}
|
|
|
|
framework.Logf("Found %d events", current)
|
|
|
|
if currentCount != lastCount {
|
|
|
|
framework.Logf("Warning: Not all updates were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
|
|
|
|
}
|
|
|
|
framework.Logf("Found %d updates", currentCount)
|
|
|
|
|
|
|
|
// Tune the threshold for allowed failures.
|
|
|
|
badEvents := framework.BadEvents(events)
|
|
|
|
Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(dtc.PodCount)))))
|
|
|
|
// Print some data about Pod to Node allocation
|
|
|
|
By("Printing Pod to Node allocation data")
|
|
|
|
podList, err := dtc.Client.Pods(api.NamespaceAll).List(api.ListOptions{})
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
pausePodAllocation := make(map[string]int)
|
|
|
|
systemPodAllocation := make(map[string][]string)
|
|
|
|
for _, pod := range podList.Items {
|
|
|
|
if pod.Namespace == api.NamespaceSystem {
|
|
|
|
systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
|
|
|
|
} else {
|
|
|
|
pausePodAllocation[pod.Spec.NodeName]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nodeNames := make([]string, 0)
|
|
|
|
for k := range pausePodAllocation {
|
|
|
|
nodeNames = append(nodeNames, k)
|
|
|
|
}
|
|
|
|
sort.Strings(nodeNames)
|
|
|
|
for _, node := range nodeNames {
|
|
|
|
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
|
|
|
|
}
|
|
|
|
return startupTime
|
|
|
|
}
|
|
|
|
|
|
|
|
func cleanupDensityTest(dtc DensityTestConfig) {
|
|
|
|
defer GinkgoRecover()
|
|
|
|
By("Deleting ReplicationController")
|
|
|
|
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
|
|
|
|
for i := range dtc.Configs {
|
|
|
|
rcName := dtc.Configs[i].Name
|
|
|
|
rc, err := dtc.Client.ReplicationControllers(dtc.Namespace).Get(rcName)
|
|
|
|
if err == nil && rc.Spec.Replicas != 0 {
|
|
|
|
By("Cleaning up the replication controller")
|
|
|
|
err := framework.DeleteRC(dtc.Client, dtc.Namespace, rcName)
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-12 22:30:06 +00:00
|
|
|
// This test suite can take a long time to run, and can affect or be affected by other tests.
|
|
|
|
// So by default it is added to the ginkgo.skip list (see driver.go).
|
2015-03-25 23:28:04 +00:00
|
|
|
// To run this suite you must explicitly ask for it by setting the
|
|
|
|
// -t/--test flag or ginkgo.focus flag.
|
2016-03-04 10:05:20 +00:00
|
|
|
// IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones
|
|
|
|
// results will not be representative for control-plane performance as we'll start hitting
|
|
|
|
// limits on Docker's concurrent container startup.
|
2016-04-07 17:21:31 +00:00
|
|
|
var _ = framework.KubeDescribe("Density", func() {
|
2015-02-23 14:50:40 +00:00
|
|
|
var c *client.Client
|
2015-09-10 08:40:22 +00:00
|
|
|
var nodeCount int
|
2015-02-23 14:50:40 +00:00
|
|
|
var RCName string
|
2015-06-18 12:35:07 +00:00
|
|
|
var additionalPodsPrefix string
|
2015-02-23 14:50:40 +00:00
|
|
|
var ns string
|
2015-05-26 14:24:46 +00:00
|
|
|
var uuid string
|
2016-02-26 14:37:35 +00:00
|
|
|
var e2eStartupTime time.Duration
|
|
|
|
var totalPods int
|
2016-03-01 14:04:08 +00:00
|
|
|
var nodeCpuCapacity int64
|
|
|
|
var nodeMemCapacity int64
|
2016-05-20 14:14:02 +00:00
|
|
|
var nodes *api.NodeList
|
|
|
|
var masters sets.String
|
2015-10-27 13:07:51 +00:00
|
|
|
|
|
|
|
// Gathers data prior to framework namespace teardown
|
|
|
|
AfterEach(func() {
|
2016-03-04 08:07:52 +00:00
|
|
|
saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
|
|
|
|
if saturationThreshold < MinSaturationThreshold {
|
|
|
|
saturationThreshold = MinSaturationThreshold
|
|
|
|
}
|
|
|
|
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
|
2016-04-07 17:21:31 +00:00
|
|
|
saturationData := framework.SaturationTime{
|
2016-02-26 14:37:35 +00:00
|
|
|
TimeToSaturate: e2eStartupTime,
|
|
|
|
NumberOfNodes: nodeCount,
|
|
|
|
NumberOfPods: totalPods,
|
|
|
|
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
|
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
|
2016-02-26 14:37:35 +00:00
|
|
|
|
2015-11-18 16:07:26 +00:00
|
|
|
// Verify latency metrics.
|
2016-04-07 17:21:31 +00:00
|
|
|
highLatencyRequests, err := framework.HighLatencyRequests(c)
|
|
|
|
framework.ExpectNoError(err)
|
2015-10-27 13:07:51 +00:00
|
|
|
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
|
2015-11-18 16:07:26 +00:00
|
|
|
|
|
|
|
// Verify scheduler metrics.
|
|
|
|
// TODO: Reset metrics at the beginning of the test.
|
|
|
|
// We should do something similar to how we do it for APIserver.
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.ExpectNoError(framework.VerifySchedulerLatency(c))
|
2015-10-27 13:07:51 +00:00
|
|
|
})
|
|
|
|
|
2015-12-03 10:15:44 +00:00
|
|
|
// Explicitly put here, to delete namespace at the end of the test
|
2016-02-26 10:00:35 +00:00
|
|
|
// (after measuring latency metrics, etc.).
|
2016-04-07 17:21:31 +00:00
|
|
|
f := framework.NewDefaultFramework("density")
|
|
|
|
f.NamespaceDeletionTimeout = time.Hour
|
2015-12-03 10:15:44 +00:00
|
|
|
|
2015-02-23 14:50:40 +00:00
|
|
|
BeforeEach(func() {
|
2016-04-07 17:21:31 +00:00
|
|
|
c = f.Client
|
|
|
|
ns = f.Namespace.Name
|
2015-09-16 18:03:21 +00:00
|
|
|
|
2016-06-02 08:12:44 +00:00
|
|
|
// In large clusters we may get to this point but still have a bunch
|
|
|
|
// of nodes without Routes created. Since this would make a node
|
|
|
|
// unschedulable, we need to wait until all of them are schedulable.
|
|
|
|
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c))
|
2016-05-20 14:14:02 +00:00
|
|
|
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
|
2015-09-10 08:40:22 +00:00
|
|
|
nodeCount = len(nodes.Items)
|
|
|
|
Expect(nodeCount).NotTo(BeZero())
|
2016-05-31 07:29:04 +00:00
|
|
|
if nodeCount == 30 {
|
|
|
|
f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
|
|
|
|
}
|
2015-06-22 11:47:05 +00:00
|
|
|
|
2016-03-01 14:04:08 +00:00
|
|
|
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue()
|
|
|
|
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value()
|
|
|
|
|
2015-06-22 11:47:05 +00:00
|
|
|
// Terminating a namespace (deleting the remaining objects from it - which
|
|
|
|
// generally means events) can affect the current run. Thus we wait for all
|
|
|
|
// terminating namespace to be finally deleted before starting this test.
|
2016-04-07 17:21:31 +00:00
|
|
|
err := framework.CheckTestingNSDeletedExcept(c, ns)
|
|
|
|
framework.ExpectNoError(err)
|
2015-06-22 11:47:05 +00:00
|
|
|
|
2016-07-26 15:13:18 +00:00
|
|
|
uuid = string(utiluuid.NewUUID())
|
2015-06-05 23:05:07 +00:00
|
|
|
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.ExpectNoError(framework.ResetMetrics(c))
|
|
|
|
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
|
2015-09-16 18:03:21 +00:00
|
|
|
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.Logf("Listing nodes for easy debugging:\n")
|
2015-09-16 18:03:21 +00:00
|
|
|
for _, node := range nodes.Items {
|
2015-11-18 15:48:39 +00:00
|
|
|
var internalIP, externalIP string
|
2015-09-16 18:03:21 +00:00
|
|
|
for _, address := range node.Status.Addresses {
|
|
|
|
if address.Type == api.NodeInternalIP {
|
2015-11-18 15:48:39 +00:00
|
|
|
internalIP = address.Address
|
|
|
|
}
|
|
|
|
if address.Type == api.NodeExternalIP {
|
|
|
|
externalIP = address.Address
|
2015-09-16 18:03:21 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
|
2015-09-16 18:03:21 +00:00
|
|
|
}
|
2015-02-23 14:50:40 +00:00
|
|
|
})
|
|
|
|
|
2015-04-06 03:37:58 +00:00
|
|
|
type Density struct {
|
2015-06-03 14:56:16 +00:00
|
|
|
// Controls if e2e latency tests should be run (they are slow)
|
|
|
|
runLatencyTest bool
|
2015-09-10 08:40:22 +00:00
|
|
|
podsPerNode int
|
2015-06-03 14:56:16 +00:00
|
|
|
// Controls how often the apiserver is polled for pods
|
2015-06-10 11:59:30 +00:00
|
|
|
interval time.Duration
|
2015-03-27 10:14:54 +00:00
|
|
|
}
|
2015-03-10 23:59:26 +00:00
|
|
|
|
2015-04-07 11:26:52 +00:00
|
|
|
densityTests := []Density{
|
2015-06-03 14:56:16 +00:00
|
|
|
// TODO: Expose runLatencyTest as ginkgo flag.
|
2015-11-12 22:30:06 +00:00
|
|
|
{podsPerNode: 3, runLatencyTest: false, interval: 10 * time.Second},
|
|
|
|
{podsPerNode: 30, runLatencyTest: true, interval: 10 * time.Second},
|
|
|
|
{podsPerNode: 50, runLatencyTest: false, interval: 10 * time.Second},
|
2016-02-29 21:12:58 +00:00
|
|
|
{podsPerNode: 95, runLatencyTest: true, interval: 10 * time.Second},
|
2016-04-18 23:13:19 +00:00
|
|
|
{podsPerNode: 100, runLatencyTest: false, interval: 10 * time.Second},
|
2015-04-07 11:26:52 +00:00
|
|
|
}
|
2015-04-06 03:37:58 +00:00
|
|
|
|
2015-04-07 11:26:52 +00:00
|
|
|
for _, testArg := range densityTests {
|
2015-09-10 08:40:22 +00:00
|
|
|
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerNode)
|
2016-02-29 21:12:58 +00:00
|
|
|
switch testArg.podsPerNode {
|
|
|
|
case 30:
|
2016-01-21 19:06:45 +00:00
|
|
|
name = "[Feature:Performance] " + name
|
2016-02-29 21:12:58 +00:00
|
|
|
case 95:
|
|
|
|
name = "[Feature:HighDensityPerformance]" + name
|
|
|
|
default:
|
2016-01-21 19:06:45 +00:00
|
|
|
name = "[Feature:ManualPerformance] " + name
|
2015-04-07 11:26:52 +00:00
|
|
|
}
|
|
|
|
itArg := testArg
|
|
|
|
It(name, func() {
|
2016-05-20 14:14:02 +00:00
|
|
|
podsPerNode := itArg.podsPerNode
|
|
|
|
totalPods = podsPerNode * nodeCount
|
2016-04-07 17:21:31 +00:00
|
|
|
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
|
|
|
framework.ExpectNoError(err)
|
2015-05-26 14:24:46 +00:00
|
|
|
defer fileHndl.Close()
|
2016-05-20 14:14:02 +00:00
|
|
|
timeout := 10 * time.Minute
|
|
|
|
|
2016-04-18 23:13:19 +00:00
|
|
|
// TODO: loop to podsPerNode instead of 1 when we're ready.
|
|
|
|
numberOrRCs := 1
|
|
|
|
RCConfigs := make([]framework.RCConfig, numberOrRCs)
|
|
|
|
for i := 0; i < numberOrRCs; i++ {
|
2016-05-20 14:14:02 +00:00
|
|
|
RCName := "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
|
2016-04-18 23:13:19 +00:00
|
|
|
RCConfigs[i] = framework.RCConfig{Client: c,
|
2016-05-26 16:16:43 +00:00
|
|
|
Image: framework.GetPauseImageName(f.Client),
|
2016-04-18 23:13:19 +00:00
|
|
|
Name: RCName,
|
|
|
|
Namespace: ns,
|
|
|
|
Labels: map[string]string{"type": "densityPod"},
|
|
|
|
PollInterval: itArg.interval,
|
|
|
|
PodStatusFile: fileHndl,
|
|
|
|
Replicas: (totalPods + numberOrRCs - 1) / numberOrRCs,
|
|
|
|
CpuRequest: nodeCpuCapacity / 100,
|
|
|
|
MemRequest: nodeMemCapacity / 100,
|
|
|
|
MaxContainerFailures: &MaxContainerFailures,
|
|
|
|
Silent: true,
|
|
|
|
}
|
2015-05-26 14:24:46 +00:00
|
|
|
}
|
2015-05-15 18:34:34 +00:00
|
|
|
|
2016-05-20 14:14:02 +00:00
|
|
|
dConfig := DensityTestConfig{Client: c,
|
|
|
|
Configs: RCConfigs,
|
|
|
|
PodCount: totalPods,
|
|
|
|
Namespace: ns,
|
|
|
|
PollInterval: itArg.interval,
|
|
|
|
Timeout: timeout,
|
2016-02-29 10:06:17 +00:00
|
|
|
}
|
2016-05-20 14:14:02 +00:00
|
|
|
e2eStartupTime = runDensityTest(dConfig)
|
2015-06-03 14:56:16 +00:00
|
|
|
if itArg.runLatencyTest {
|
2016-02-29 10:06:17 +00:00
|
|
|
By("Scheduling additional Pods to measure startup latencies")
|
2015-06-03 14:56:16 +00:00
|
|
|
|
2015-09-17 22:21:55 +00:00
|
|
|
createTimes := make(map[string]unversioned.Time, 0)
|
2015-06-19 17:04:37 +00:00
|
|
|
nodes := make(map[string]string, 0)
|
2015-09-17 22:21:55 +00:00
|
|
|
scheduleTimes := make(map[string]unversioned.Time, 0)
|
|
|
|
runTimes := make(map[string]unversioned.Time, 0)
|
|
|
|
watchTimes := make(map[string]unversioned.Time, 0)
|
2015-06-03 14:56:16 +00:00
|
|
|
|
|
|
|
var mutex sync.Mutex
|
|
|
|
checkPod := func(p *api.Pod) {
|
|
|
|
mutex.Lock()
|
|
|
|
defer mutex.Unlock()
|
|
|
|
defer GinkgoRecover()
|
|
|
|
|
|
|
|
if p.Status.Phase == api.PodRunning {
|
|
|
|
if _, found := watchTimes[p.Name]; !found {
|
2015-09-17 22:21:55 +00:00
|
|
|
watchTimes[p.Name] = unversioned.Now()
|
2015-06-03 14:56:16 +00:00
|
|
|
createTimes[p.Name] = p.CreationTimestamp
|
2015-06-19 17:04:37 +00:00
|
|
|
nodes[p.Name] = p.Spec.NodeName
|
2015-09-17 22:21:55 +00:00
|
|
|
var startTime unversioned.Time
|
2015-06-03 14:56:16 +00:00
|
|
|
for _, cs := range p.Status.ContainerStatuses {
|
|
|
|
if cs.State.Running != nil {
|
|
|
|
if startTime.Before(cs.State.Running.StartedAt) {
|
|
|
|
startTime = cs.State.Running.StartedAt
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-09-17 22:21:55 +00:00
|
|
|
if startTime != unversioned.NewTime(time.Time{}) {
|
2015-06-03 14:56:16 +00:00
|
|
|
runTimes[p.Name] = startTime
|
|
|
|
} else {
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
|
2015-06-03 14:56:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-14 10:44:35 +00:00
|
|
|
additionalPodsPrefix = "density-latency-pod"
|
2016-03-01 14:04:08 +00:00
|
|
|
latencyPodsStore, controller := controllerframework.NewInformer(
|
2015-06-03 14:56:16 +00:00
|
|
|
&cache.ListWatch{
|
2015-12-10 09:39:03 +00:00
|
|
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
2016-03-14 10:44:35 +00:00
|
|
|
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
|
2015-12-02 11:12:57 +00:00
|
|
|
return c.Pods(ns).List(options)
|
2015-06-03 14:56:16 +00:00
|
|
|
},
|
2015-12-10 09:39:03 +00:00
|
|
|
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
2016-03-14 10:44:35 +00:00
|
|
|
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
|
2015-11-26 15:27:45 +00:00
|
|
|
return c.Pods(ns).Watch(options)
|
2015-06-03 14:56:16 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
&api.Pod{},
|
2015-06-18 18:11:13 +00:00
|
|
|
0,
|
2015-10-16 08:01:58 +00:00
|
|
|
controllerframework.ResourceEventHandlerFuncs{
|
2015-06-03 14:56:16 +00:00
|
|
|
AddFunc: func(obj interface{}) {
|
|
|
|
p, ok := obj.(*api.Pod)
|
|
|
|
Expect(ok).To(Equal(true))
|
|
|
|
go checkPod(p)
|
|
|
|
},
|
|
|
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
|
|
|
p, ok := newObj.(*api.Pod)
|
|
|
|
Expect(ok).To(Equal(true))
|
|
|
|
go checkPod(p)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
go controller.Run(stopCh)
|
|
|
|
|
2015-06-18 12:35:07 +00:00
|
|
|
// Create some additional pods with throughput ~5 pods/sec.
|
|
|
|
var wg sync.WaitGroup
|
2015-09-10 08:40:22 +00:00
|
|
|
wg.Add(nodeCount)
|
2016-03-01 14:04:08 +00:00
|
|
|
// Explicitly set requests here.
|
|
|
|
// Thanks to it we trigger increasing priority function by scheduling
|
|
|
|
// a pod to a node, which in turn will result in spreading latency pods
|
|
|
|
// more evenly between nodes.
|
|
|
|
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
|
|
|
|
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
|
|
|
|
if podsPerNode > 30 {
|
|
|
|
// This is to make them schedulable on high-density tests
|
|
|
|
// (e.g. 100 pods/node kubemark).
|
|
|
|
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
|
|
|
|
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
|
|
|
|
}
|
2015-09-10 08:40:22 +00:00
|
|
|
for i := 1; i <= nodeCount; i++ {
|
2015-06-18 12:35:07 +00:00
|
|
|
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
|
2016-05-26 16:16:43 +00:00
|
|
|
go createRunningPodFromRC(&wg, c, name, ns, framework.GetPauseImageName(f.Client), additionalPodsPrefix, cpuRequest, memRequest)
|
2015-06-18 12:35:07 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2015-06-03 14:56:16 +00:00
|
|
|
}
|
2015-06-18 12:35:07 +00:00
|
|
|
wg.Wait()
|
2015-06-03 14:56:16 +00:00
|
|
|
|
2016-02-29 10:06:17 +00:00
|
|
|
By("Waiting for all Pods begin observed by the watch...")
|
2015-09-08 19:34:55 +00:00
|
|
|
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
|
|
|
|
if time.Since(start) < timeout {
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
|
2015-09-08 19:34:55 +00:00
|
|
|
}
|
2015-06-03 14:56:16 +00:00
|
|
|
}
|
|
|
|
close(stopCh)
|
|
|
|
|
2016-03-01 14:04:08 +00:00
|
|
|
nodeToLatencyPods := make(map[string]int)
|
|
|
|
for _, item := range latencyPodsStore.List() {
|
|
|
|
pod := item.(*api.Pod)
|
|
|
|
nodeToLatencyPods[pod.Spec.NodeName]++
|
|
|
|
}
|
|
|
|
for node, count := range nodeToLatencyPods {
|
|
|
|
if count > 1 {
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.Logf("%d latency pods scheduled on %s", count, node)
|
2016-03-01 14:04:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-02 11:12:57 +00:00
|
|
|
selector := fields.Set{
|
|
|
|
"involvedObject.kind": "Pod",
|
|
|
|
"involvedObject.namespace": ns,
|
2015-12-22 09:50:01 +00:00
|
|
|
"source": api.DefaultSchedulerName,
|
2015-12-02 11:12:57 +00:00
|
|
|
}.AsSelector()
|
2015-12-10 09:39:03 +00:00
|
|
|
options := api.ListOptions{FieldSelector: selector}
|
2015-12-02 11:12:57 +00:00
|
|
|
schedEvents, err := c.Events(ns).List(options)
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.ExpectNoError(err)
|
2015-06-03 14:56:16 +00:00
|
|
|
for k := range createTimes {
|
|
|
|
for _, event := range schedEvents.Items {
|
|
|
|
if event.InvolvedObject.Name == k {
|
|
|
|
scheduleTimes[k] = event.FirstTimestamp
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-07 17:21:31 +00:00
|
|
|
scheduleLag := make([]framework.PodLatencyData, 0)
|
|
|
|
startupLag := make([]framework.PodLatencyData, 0)
|
|
|
|
watchLag := make([]framework.PodLatencyData, 0)
|
|
|
|
schedToWatchLag := make([]framework.PodLatencyData, 0)
|
|
|
|
e2eLag := make([]framework.PodLatencyData, 0)
|
2015-06-03 14:56:16 +00:00
|
|
|
|
|
|
|
for name, create := range createTimes {
|
|
|
|
sched, ok := scheduleTimes[name]
|
|
|
|
Expect(ok).To(Equal(true))
|
|
|
|
run, ok := runTimes[name]
|
|
|
|
Expect(ok).To(Equal(true))
|
|
|
|
watch, ok := watchTimes[name]
|
|
|
|
Expect(ok).To(Equal(true))
|
2015-06-19 17:04:37 +00:00
|
|
|
node, ok := nodes[name]
|
|
|
|
Expect(ok).To(Equal(true))
|
|
|
|
|
2016-04-15 19:18:27 +00:00
|
|
|
scheduleLag = append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
|
|
|
|
startupLag = append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
|
|
|
|
watchLag = append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
|
|
|
|
schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
|
|
|
|
e2eLag = append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
|
2015-06-03 14:56:16 +00:00
|
|
|
}
|
|
|
|
|
2016-04-07 17:21:31 +00:00
|
|
|
sort.Sort(framework.LatencySlice(scheduleLag))
|
|
|
|
sort.Sort(framework.LatencySlice(startupLag))
|
|
|
|
sort.Sort(framework.LatencySlice(watchLag))
|
|
|
|
sort.Sort(framework.LatencySlice(schedToWatchLag))
|
|
|
|
sort.Sort(framework.LatencySlice(e2eLag))
|
2015-06-03 14:56:16 +00:00
|
|
|
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.PrintLatencies(scheduleLag, "worst schedule latencies")
|
|
|
|
framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
|
|
|
|
framework.PrintLatencies(watchLag, "worst watch latencies")
|
|
|
|
framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
|
|
|
|
framework.PrintLatencies(e2eLag, "worst e2e total latencies")
|
2015-06-03 14:56:16 +00:00
|
|
|
|
2015-09-08 10:05:14 +00:00
|
|
|
// Test whether e2e pod startup time is acceptable.
|
2016-04-07 17:21:31 +00:00
|
|
|
podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
|
|
|
|
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
|
2015-09-08 10:05:14 +00:00
|
|
|
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
|
2015-06-03 14:56:16 +00:00
|
|
|
}
|
2016-02-02 13:55:27 +00:00
|
|
|
|
2016-05-20 14:14:02 +00:00
|
|
|
cleanupDensityTest(dConfig)
|
2016-02-02 13:55:27 +00:00
|
|
|
|
2016-03-14 10:44:35 +00:00
|
|
|
By("Removing additional replication controllers if any")
|
2016-02-02 13:55:27 +00:00
|
|
|
for i := 1; i <= nodeCount; i++ {
|
|
|
|
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
|
2016-08-03 04:49:03 +00:00
|
|
|
c.ReplicationControllers(ns).Delete(name, nil)
|
2016-02-02 13:55:27 +00:00
|
|
|
}
|
2015-04-07 11:26:52 +00:00
|
|
|
})
|
|
|
|
}
|
2016-05-20 14:14:02 +00:00
|
|
|
|
|
|
|
// Calculate total number of pods from each node's max-pod
|
|
|
|
It("[Feature:ManualPerformance] should allow running maximum capacity pods on nodes", func() {
|
|
|
|
totalPods = 0
|
|
|
|
for _, n := range nodes.Items {
|
|
|
|
totalPods += int(n.Status.Capacity.Pods().Value())
|
|
|
|
}
|
|
|
|
totalPods -= framework.WaitForStableCluster(c, masters)
|
|
|
|
|
|
|
|
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
defer fileHndl.Close()
|
|
|
|
rcCnt := 1
|
|
|
|
RCConfigs := make([]framework.RCConfig, rcCnt)
|
|
|
|
podsPerRC := int(totalPods / rcCnt)
|
|
|
|
for i := 0; i < rcCnt; i++ {
|
|
|
|
if i == rcCnt-1 {
|
|
|
|
podsPerRC += int(math.Mod(float64(totalPods), float64(rcCnt)))
|
|
|
|
}
|
|
|
|
RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
|
|
|
|
RCConfigs[i] = framework.RCConfig{Client: c,
|
|
|
|
Image: "gcr.io/google_containers/pause-amd64:3.0",
|
|
|
|
Name: RCName,
|
|
|
|
Namespace: ns,
|
|
|
|
Labels: map[string]string{"type": "densityPod"},
|
|
|
|
PollInterval: 10 * time.Second,
|
|
|
|
PodStatusFile: fileHndl,
|
|
|
|
Replicas: podsPerRC,
|
|
|
|
MaxContainerFailures: &MaxContainerFailures,
|
|
|
|
Silent: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dConfig := DensityTestConfig{Client: c,
|
|
|
|
Configs: RCConfigs,
|
|
|
|
PodCount: totalPods,
|
|
|
|
Namespace: ns,
|
|
|
|
PollInterval: 10 * time.Second,
|
|
|
|
Timeout: 10 * time.Minute,
|
|
|
|
}
|
|
|
|
e2eStartupTime = runDensityTest(dConfig)
|
|
|
|
cleanupDensityTest(dConfig)
|
|
|
|
})
|
2015-02-23 14:50:40 +00:00
|
|
|
})
|
2015-06-18 12:35:07 +00:00
|
|
|
|
2016-03-14 10:44:35 +00:00
|
|
|
func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
|
2015-06-18 12:35:07 +00:00
|
|
|
defer GinkgoRecover()
|
|
|
|
defer wg.Done()
|
2016-03-14 10:44:35 +00:00
|
|
|
labels := map[string]string{
|
|
|
|
"type": podType,
|
|
|
|
"name": name,
|
|
|
|
}
|
|
|
|
rc := &api.ReplicationController{
|
2015-06-18 12:35:07 +00:00
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: name,
|
|
|
|
Labels: labels,
|
|
|
|
},
|
2016-03-14 10:44:35 +00:00
|
|
|
Spec: api.ReplicationControllerSpec{
|
|
|
|
Replicas: 1,
|
|
|
|
Selector: labels,
|
|
|
|
Template: &api.PodTemplateSpec{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Labels: labels,
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
|
|
|
Name: name,
|
|
|
|
Image: image,
|
|
|
|
Resources: api.ResourceRequirements{
|
|
|
|
Requests: api.ResourceList{
|
|
|
|
api.ResourceCPU: cpuRequest,
|
|
|
|
api.ResourceMemory: memRequest,
|
|
|
|
},
|
|
|
|
},
|
2016-03-01 14:04:08 +00:00
|
|
|
},
|
|
|
|
},
|
2016-03-14 10:44:35 +00:00
|
|
|
DNSPolicy: api.DNSDefault,
|
2015-06-18 12:35:07 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2016-03-14 10:44:35 +00:00
|
|
|
_, err := c.ReplicationControllers(ns).Create(rc)
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.ExpectNoError(err)
|
|
|
|
framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
|
|
|
|
framework.Logf("Found pod '%s' running", name)
|
2015-06-18 12:35:07 +00:00
|
|
|
}
|