2015-07-21 14:15:55 +00:00
|
|
|
/*
|
2016-06-13 07:45:30 +00:00
|
|
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
2015-07-21 14:15:55 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
2016-06-03 10:39:17 +00:00
|
|
|
"bytes"
|
2015-07-21 14:15:55 +00:00
|
|
|
"fmt"
|
2016-06-03 10:39:17 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"net/http"
|
|
|
|
"os/exec"
|
2016-06-15 12:42:03 +00:00
|
|
|
"strconv"
|
2016-05-23 12:10:40 +00:00
|
|
|
"strings"
|
2015-07-21 14:15:55 +00:00
|
|
|
"time"
|
|
|
|
|
2015-08-14 09:50:19 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
2016-05-30 14:45:47 +00:00
|
|
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
|
|
|
"k8s.io/kubernetes/pkg/fields"
|
2016-04-07 17:21:31 +00:00
|
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
2015-08-14 09:50:19 +00:00
|
|
|
|
2016-05-30 14:45:47 +00:00
|
|
|
"github.com/golang/glog"
|
2015-07-21 14:15:55 +00:00
|
|
|
. "github.com/onsi/ginkgo"
|
2015-08-14 09:50:19 +00:00
|
|
|
. "github.com/onsi/gomega"
|
2015-07-21 14:15:55 +00:00
|
|
|
)
|
|
|
|
|
2015-10-07 09:15:58 +00:00
|
|
|
const (
|
2016-05-30 14:45:47 +00:00
|
|
|
defaultTimeout = 3 * time.Minute
|
|
|
|
resizeTimeout = 5 * time.Minute
|
|
|
|
scaleUpTimeout = 5 * time.Minute
|
|
|
|
scaleDownTimeout = 15 * time.Minute
|
2016-06-03 10:39:17 +00:00
|
|
|
|
|
|
|
gkeEndpoint = "https://test-container.sandbox.googleapis.com"
|
|
|
|
gkeUpdateTimeout = 10 * time.Minute
|
2015-10-07 09:15:58 +00:00
|
|
|
)
|
|
|
|
|
2016-05-23 12:10:40 +00:00
|
|
|
var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
2016-04-07 17:21:31 +00:00
|
|
|
f := framework.NewDefaultFramework("autoscaling")
|
2016-06-06 08:41:54 +00:00
|
|
|
var c *client.Client
|
2015-08-14 09:50:19 +00:00
|
|
|
var nodeCount int
|
|
|
|
var coresPerNode int
|
2015-08-19 13:05:13 +00:00
|
|
|
var memCapacityMb int
|
2016-05-23 12:10:40 +00:00
|
|
|
var originalSizes map[string]int
|
2015-07-21 14:15:55 +00:00
|
|
|
|
|
|
|
BeforeEach(func() {
|
2016-06-06 08:41:54 +00:00
|
|
|
c = f.Client
|
2016-06-03 10:39:17 +00:00
|
|
|
framework.SkipUnlessProviderIs("gce", "gke")
|
2015-08-14 09:50:19 +00:00
|
|
|
|
2016-05-05 20:56:25 +00:00
|
|
|
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
2015-08-14 09:50:19 +00:00
|
|
|
nodeCount = len(nodes.Items)
|
|
|
|
Expect(nodeCount).NotTo(BeZero())
|
2015-08-19 13:05:13 +00:00
|
|
|
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
|
|
|
|
mem := nodes.Items[0].Status.Capacity[api.ResourceMemory]
|
|
|
|
coresPerNode = int((&cpu).MilliValue() / 1000)
|
|
|
|
memCapacityMb = int((&mem).Value() / 1024 / 1024)
|
2016-05-23 12:10:40 +00:00
|
|
|
|
|
|
|
originalSizes = make(map[string]int)
|
|
|
|
sum := 0
|
|
|
|
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
|
|
|
size, err := GroupSize(mig)
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
|
|
|
originalSizes[mig] = size
|
|
|
|
sum += size
|
|
|
|
}
|
|
|
|
Expect(nodeCount).Should(Equal(sum))
|
2016-06-10 13:56:29 +00:00
|
|
|
|
|
|
|
if framework.ProviderIs("gke") {
|
2016-06-15 12:42:03 +00:00
|
|
|
val, err := isAutoscalerEnabled(3)
|
2016-06-10 13:56:29 +00:00
|
|
|
framework.ExpectNoError(err)
|
|
|
|
if !val {
|
2016-06-15 12:42:03 +00:00
|
|
|
err = enableAutoscaler("default-pool", 3, 5)
|
2016-06-10 13:56:29 +00:00
|
|
|
framework.ExpectNoError(err)
|
|
|
|
}
|
|
|
|
}
|
2015-07-21 14:15:55 +00:00
|
|
|
})
|
|
|
|
|
2016-06-06 08:41:54 +00:00
|
|
|
AfterEach(func() {
|
2016-06-07 12:04:58 +00:00
|
|
|
By(fmt.Sprintf("Restoring initial size of the cluster"))
|
2016-06-06 14:01:05 +00:00
|
|
|
setMigSizes(originalSizes)
|
2016-06-06 08:41:54 +00:00
|
|
|
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount, scaleDownTimeout))
|
|
|
|
})
|
|
|
|
|
|
|
|
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
2016-06-06 14:01:05 +00:00
|
|
|
By("Creating unschedulable pod")
|
2016-05-16 11:27:18 +00:00
|
|
|
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
|
2016-06-06 14:01:05 +00:00
|
|
|
defer framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation")
|
2016-05-23 12:10:40 +00:00
|
|
|
|
2016-06-06 14:01:05 +00:00
|
|
|
By("Waiting for scale up hoping it won't happen")
|
2016-05-23 12:10:40 +00:00
|
|
|
// Verfiy, that the appropreate event was generated.
|
|
|
|
eventFound := false
|
|
|
|
EventsLoop:
|
|
|
|
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
|
|
|
|
By("Waiting for NotTriggerScaleUp event")
|
|
|
|
events, err := f.Client.Events(f.Namespace.Name).List(api.ListOptions{})
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
|
|
|
|
for _, e := range events.Items {
|
|
|
|
if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") {
|
|
|
|
By("NotTriggerScaleUp event found")
|
|
|
|
eventFound = true
|
|
|
|
break EventsLoop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Expect(eventFound).Should(Equal(true))
|
|
|
|
// Verify, that cluster size is not changed.
|
2016-05-23 12:10:40 +00:00
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
2016-06-06 14:01:05 +00:00
|
|
|
func(size int) bool { return size <= nodeCount }, time.Second))
|
2016-05-23 12:10:40 +00:00
|
|
|
})
|
2015-07-21 14:15:55 +00:00
|
|
|
|
2016-05-23 12:10:40 +00:00
|
|
|
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
2016-05-16 11:27:18 +00:00
|
|
|
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
|
2016-06-06 14:01:05 +00:00
|
|
|
defer framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation")
|
|
|
|
|
2016-05-16 11:27:18 +00:00
|
|
|
// Verify, that cluster size is increased
|
2016-05-30 14:45:47 +00:00
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
2016-06-13 14:06:06 +00:00
|
|
|
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
2016-05-23 12:10:40 +00:00
|
|
|
})
|
2016-05-17 13:07:30 +00:00
|
|
|
|
2016-05-23 12:10:40 +00:00
|
|
|
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
2016-05-17 13:07:30 +00:00
|
|
|
CreateHostPortPods(f, "host-port", nodeCount+2, false)
|
2016-06-06 14:01:05 +00:00
|
|
|
defer framework.DeleteRC(f.Client, f.Namespace.Name, "host-port")
|
|
|
|
|
2016-05-30 14:45:47 +00:00
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
|
2016-06-13 14:06:06 +00:00
|
|
|
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
2016-05-30 14:45:47 +00:00
|
|
|
})
|
|
|
|
|
2016-05-23 12:10:40 +00:00
|
|
|
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
|
|
|
labels := map[string]string{"cluster-autoscaling-test.special-node": "true"}
|
|
|
|
|
|
|
|
By("Finding the smallest MIG")
|
|
|
|
minMig := ""
|
|
|
|
minSize := nodeCount
|
|
|
|
for mig, size := range originalSizes {
|
|
|
|
if size <= minSize {
|
|
|
|
minMig = mig
|
|
|
|
minSize = size
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-06 14:01:05 +00:00
|
|
|
removeLabels := func(nodesToClean []string) {
|
|
|
|
By("Removing labels from nodes")
|
|
|
|
for _, node := range nodesToClean {
|
|
|
|
updateLabelsForNode(f, node, map[string]string{}, []string{"cluster-autoscaling-test.special-node"})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-23 12:10:40 +00:00
|
|
|
nodes, err := GetGroupNodes(minMig)
|
2016-06-06 14:01:05 +00:00
|
|
|
defer removeLabels(nodes)
|
2016-05-23 12:10:40 +00:00
|
|
|
nodesMap := map[string]struct{}{}
|
|
|
|
ExpectNoError(err)
|
2016-06-07 12:04:58 +00:00
|
|
|
By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
|
2016-05-23 12:10:40 +00:00
|
|
|
for _, node := range nodes {
|
|
|
|
updateLabelsForNode(f, node, labels, nil)
|
|
|
|
nodesMap[node] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
CreateNodeSelectorPods(f, "node-selector", minSize+1, labels, false)
|
|
|
|
|
|
|
|
By("Waiting for new node to appear and annotating it")
|
|
|
|
WaitForGroupSize(minMig, int32(minSize+1))
|
2016-06-06 14:01:05 +00:00
|
|
|
// Verify, that cluster size is increased
|
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
|
|
|
|
2016-05-23 12:10:40 +00:00
|
|
|
newNodes, err := GetGroupNodes(minMig)
|
2016-06-06 14:01:05 +00:00
|
|
|
defer removeLabels(newNodes)
|
2016-05-23 12:10:40 +00:00
|
|
|
ExpectNoError(err)
|
2016-06-07 12:04:58 +00:00
|
|
|
By(fmt.Sprintf("Setting labels for new nodes: %v", newNodes))
|
2016-05-23 12:10:40 +00:00
|
|
|
for _, node := range newNodes {
|
|
|
|
if _, old := nodesMap[node]; !old {
|
|
|
|
updateLabelsForNode(f, node, labels, nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
|
|
|
|
2016-06-13 14:06:06 +00:00
|
|
|
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
2016-05-23 12:10:40 +00:00
|
|
|
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "node-selector"))
|
|
|
|
})
|
2016-06-10 13:56:29 +00:00
|
|
|
|
2016-06-15 12:42:03 +00:00
|
|
|
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
2016-06-10 13:56:29 +00:00
|
|
|
framework.SkipUnlessProviderIs("gke")
|
|
|
|
|
|
|
|
By("Creating new node-pool with one n1-standard-4 machine")
|
2016-06-15 12:42:03 +00:00
|
|
|
const extraPoolName = "extra-pool"
|
|
|
|
output, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create", extraPoolName, "--quiet",
|
2016-06-10 13:56:29 +00:00
|
|
|
"--machine-type=n1-standard-4",
|
|
|
|
"--num-nodes=1",
|
|
|
|
"--project="+framework.TestContext.CloudConfig.ProjectID,
|
|
|
|
"--zone="+framework.TestContext.CloudConfig.Zone,
|
|
|
|
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
|
2016-06-16 12:01:24 +00:00
|
|
|
defer func() {
|
|
|
|
glog.Infof("Deleting node pool %s", extraPoolName)
|
|
|
|
output, err := exec.Command("gcloud", "alpha", "container", "node-pools", "delete", extraPoolName, "--quiet",
|
|
|
|
"--project="+framework.TestContext.CloudConfig.ProjectID,
|
|
|
|
"--zone="+framework.TestContext.CloudConfig.Zone,
|
|
|
|
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
|
|
|
|
if err != nil {
|
|
|
|
glog.Infof("Error: %v", err)
|
|
|
|
}
|
|
|
|
glog.Infof("Node-pool deletion output: %s", output)
|
|
|
|
}()
|
2016-06-10 13:56:29 +00:00
|
|
|
framework.ExpectNoError(err)
|
|
|
|
glog.Infof("Creating node-pool: %s", output)
|
|
|
|
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
|
2016-06-15 12:42:03 +00:00
|
|
|
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
2016-06-10 13:56:29 +00:00
|
|
|
|
|
|
|
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
|
|
|
|
ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
|
|
|
|
defer framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation")
|
|
|
|
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+2, scaleUpTimeout))
|
|
|
|
})
|
|
|
|
|
|
|
|
It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
|
|
|
By("Manually increase cluster size")
|
|
|
|
increasedSize := 0
|
|
|
|
newSizes := make(map[string]int)
|
|
|
|
for key, val := range originalSizes {
|
|
|
|
newSizes[key] = val + 2
|
|
|
|
increasedSize += val + 2
|
|
|
|
}
|
|
|
|
setMigSizes(newSizes)
|
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size >= increasedSize }, scaleUpTimeout))
|
|
|
|
|
|
|
|
By("Some node should be removed")
|
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
|
|
|
|
})
|
2015-07-21 14:15:55 +00:00
|
|
|
})
|
|
|
|
|
2016-06-03 10:39:17 +00:00
|
|
|
func getGKEClusterUrl() string {
|
|
|
|
out, err := exec.Command("gcloud", "auth", "print-access-token").Output()
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
token := strings.Replace(string(out), "\n", "", -1)
|
|
|
|
|
|
|
|
return fmt.Sprintf("%s/v1/projects/%s/zones/%s/clusters/%s?access_token=%s",
|
|
|
|
gkeEndpoint,
|
|
|
|
framework.TestContext.CloudConfig.ProjectID,
|
|
|
|
framework.TestContext.CloudConfig.Zone,
|
|
|
|
framework.TestContext.CloudConfig.Cluster,
|
|
|
|
token)
|
|
|
|
}
|
|
|
|
|
2016-06-15 12:42:03 +00:00
|
|
|
func isAutoscalerEnabled(expectedMinNodeCountInTargetPool int) (bool, error) {
|
2016-06-03 10:39:17 +00:00
|
|
|
resp, err := http.Get(getGKEClusterUrl())
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
strBody := string(body)
|
|
|
|
glog.Infof("Cluster config %s", strBody)
|
|
|
|
|
2016-06-15 12:42:03 +00:00
|
|
|
if strings.Contains(strBody, "\"minNodeCount\": "+strconv.Itoa(expectedMinNodeCountInTargetPool)) {
|
2016-06-03 10:39:17 +00:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2016-06-15 12:42:03 +00:00
|
|
|
func enableAutoscaler(nodePool string, minCount, maxCount int) error {
|
2016-06-03 10:39:17 +00:00
|
|
|
updateRequest := "{" +
|
|
|
|
" \"update\": {" +
|
2016-06-15 12:42:03 +00:00
|
|
|
" \"desiredNodePoolId\": \"" + nodePool + "\"," +
|
2016-06-03 10:39:17 +00:00
|
|
|
" \"desiredNodePoolAutoscaling\": {" +
|
|
|
|
" \"enabled\": \"true\"," +
|
2016-06-15 12:42:03 +00:00
|
|
|
" \"minNodeCount\": \"" + strconv.Itoa(minCount) + "\"," +
|
|
|
|
" \"maxNodeCount\": \"" + strconv.Itoa(maxCount) + "\"" +
|
2016-06-03 10:39:17 +00:00
|
|
|
" }" +
|
|
|
|
" }" +
|
|
|
|
"}"
|
|
|
|
|
|
|
|
url := getGKEClusterUrl()
|
|
|
|
glog.Infof("Using gke api url %s", url)
|
|
|
|
putResult, err := doPut(url, updateRequest)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed to put %s: %v", url, err)
|
|
|
|
}
|
|
|
|
glog.Infof("Config update result: %s", putResult)
|
|
|
|
|
|
|
|
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
|
2016-06-15 12:42:03 +00:00
|
|
|
if val, err := isAutoscalerEnabled(minCount); err == nil && val {
|
2016-06-03 10:39:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return fmt.Errorf("autoscaler not enabled")
|
|
|
|
}
|
|
|
|
|
|
|
|
func doPut(url, content string) (string, error) {
|
|
|
|
req, err := http.NewRequest("PUT", url, bytes.NewBuffer([]byte(content)))
|
|
|
|
req.Header.Set("Content-Type", "application/json")
|
|
|
|
client := &http.Client{}
|
|
|
|
resp, err := client.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
strBody := string(body)
|
|
|
|
return strBody, nil
|
|
|
|
}
|
|
|
|
|
2016-05-23 12:10:40 +00:00
|
|
|
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) {
|
|
|
|
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
|
|
|
|
|
|
|
|
config := &framework.RCConfig{
|
|
|
|
Client: f.Client,
|
|
|
|
Name: "node-selector",
|
|
|
|
Namespace: f.Namespace.Name,
|
|
|
|
Timeout: defaultTimeout,
|
|
|
|
Image: "gcr.io/google_containers/pause-amd64:3.0",
|
|
|
|
Replicas: replicas,
|
|
|
|
HostPorts: map[string]int{"port1": 4321},
|
|
|
|
NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"},
|
|
|
|
}
|
|
|
|
err := framework.RunRC(*config)
|
|
|
|
if expectRunning {
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-17 13:07:30 +00:00
|
|
|
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
|
|
|
|
By(fmt.Sprintf("Running RC which reserves host port"))
|
|
|
|
config := &framework.RCConfig{
|
|
|
|
Client: f.Client,
|
|
|
|
Name: id,
|
|
|
|
Namespace: f.Namespace.Name,
|
2016-05-30 14:45:47 +00:00
|
|
|
Timeout: defaultTimeout,
|
2016-05-26 16:16:43 +00:00
|
|
|
Image: framework.GetPauseImageName(f.Client),
|
2016-05-17 13:07:30 +00:00
|
|
|
Replicas: replicas,
|
|
|
|
HostPorts: map[string]int{"port1": 4321},
|
|
|
|
}
|
|
|
|
err := framework.RunRC(*config)
|
|
|
|
if expectRunning {
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-16 11:27:18 +00:00
|
|
|
func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
|
2015-07-21 14:15:55 +00:00
|
|
|
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
|
2016-05-16 11:27:18 +00:00
|
|
|
request := int64(millicores / replicas)
|
2016-04-07 17:21:31 +00:00
|
|
|
config := &framework.RCConfig{
|
2015-09-25 09:41:26 +00:00
|
|
|
Client: f.Client,
|
|
|
|
Name: id,
|
|
|
|
Namespace: f.Namespace.Name,
|
2016-05-30 14:45:47 +00:00
|
|
|
Timeout: defaultTimeout,
|
2016-05-26 16:16:43 +00:00
|
|
|
Image: framework.GetPauseImageName(f.Client),
|
2016-05-16 11:27:18 +00:00
|
|
|
Replicas: replicas,
|
|
|
|
CpuRequest: request,
|
2015-07-21 14:15:55 +00:00
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.ExpectNoError(framework.RunRC(*config))
|
2015-07-21 14:15:55 +00:00
|
|
|
}
|
|
|
|
|
2016-05-16 11:27:18 +00:00
|
|
|
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool) {
|
2015-09-25 09:41:26 +00:00
|
|
|
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
|
2016-05-16 11:27:18 +00:00
|
|
|
request := int64(1024 * 1024 * megabytes / replicas)
|
2016-04-07 17:21:31 +00:00
|
|
|
config := &framework.RCConfig{
|
2015-09-25 09:41:26 +00:00
|
|
|
Client: f.Client,
|
|
|
|
Name: id,
|
|
|
|
Namespace: f.Namespace.Name,
|
2016-05-30 14:45:47 +00:00
|
|
|
Timeout: defaultTimeout,
|
2016-05-26 16:16:43 +00:00
|
|
|
Image: framework.GetPauseImageName(f.Client),
|
2016-05-16 11:27:18 +00:00
|
|
|
Replicas: replicas,
|
|
|
|
MemRequest: request,
|
|
|
|
}
|
|
|
|
err := framework.RunRC(*config)
|
|
|
|
if expectRunning {
|
|
|
|
framework.ExpectNoError(err)
|
2015-07-21 14:15:55 +00:00
|
|
|
}
|
|
|
|
}
|
2016-05-30 14:45:47 +00:00
|
|
|
|
|
|
|
// WaitForClusterSize waits until the cluster size matches the given function.
|
|
|
|
func WaitForClusterSizeFunc(c *client.Client, sizeFunc func(int) bool, timeout time.Duration) error {
|
|
|
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
|
|
|
nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
|
|
|
|
"spec.unschedulable": "false",
|
|
|
|
}.AsSelector()})
|
|
|
|
if err != nil {
|
|
|
|
glog.Warningf("Failed to list nodes: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
numNodes := len(nodes.Items)
|
|
|
|
|
|
|
|
// Filter out not-ready nodes.
|
|
|
|
framework.FilterNodes(nodes, func(node api.Node) bool {
|
|
|
|
return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
|
|
|
|
})
|
|
|
|
numReady := len(nodes.Items)
|
|
|
|
|
|
|
|
if numNodes == numReady && sizeFunc(numReady) {
|
|
|
|
glog.Infof("Cluster has reached the desired size")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
|
|
|
|
}
|
|
|
|
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
|
|
|
|
}
|
2016-05-23 12:10:40 +00:00
|
|
|
|
2016-06-13 14:06:06 +00:00
|
|
|
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c *client.Client) error {
|
|
|
|
var notready []string
|
|
|
|
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
|
|
|
|
pods, err := c.Pods(f.Namespace.Name).List(api.ListOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get pods: %v", err)
|
|
|
|
}
|
|
|
|
notready = make([]string, 0)
|
|
|
|
for _, pod := range pods.Items {
|
|
|
|
ready := false
|
|
|
|
for _, c := range pod.Status.Conditions {
|
|
|
|
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
|
|
|
|
ready = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !ready {
|
|
|
|
notready = append(notready, pod.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(notready) == 0 {
|
|
|
|
glog.Infof("All pods ready")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
glog.Infof("Some pods are not ready yet: %v", notready)
|
|
|
|
}
|
2016-06-16 10:00:55 +00:00
|
|
|
glog.Info("Timeout on waiting for pods being ready")
|
|
|
|
glog.Info(framework.RunKubectlOrDie("get", "pods", "-o json", "--all-namespaces"))
|
|
|
|
glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o json"))
|
|
|
|
|
2016-06-13 14:06:06 +00:00
|
|
|
// Some pods are still not running.
|
|
|
|
return fmt.Errorf("Some pods are still not running: %v", notready)
|
|
|
|
}
|
|
|
|
|
2016-06-06 14:01:05 +00:00
|
|
|
func setMigSizes(sizes map[string]int) {
|
2016-05-23 12:10:40 +00:00
|
|
|
for mig, desiredSize := range sizes {
|
|
|
|
currentSize, err := GroupSize(mig)
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
if desiredSize != currentSize {
|
|
|
|
By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
|
|
|
|
err = ResizeGroup(mig, int32(desiredSize))
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-05-23 12:10:40 +00:00
|
|
|
|
|
|
|
func updateLabelsForNode(f *framework.Framework, node string, addLabels map[string]string, rmLabels []string) {
|
|
|
|
n, err := f.Client.Nodes().Get(node)
|
|
|
|
ExpectNoError(err)
|
|
|
|
for _, label := range rmLabels {
|
|
|
|
delete(n.Labels, label)
|
|
|
|
}
|
|
|
|
for label, value := range addLabels {
|
|
|
|
n.Labels[label] = value
|
|
|
|
}
|
|
|
|
_, err = f.Client.Nodes().Update(n)
|
|
|
|
ExpectNoError(err)
|
2016-06-07 12:04:58 +00:00
|
|
|
By(fmt.Sprintf("Labels successfully updated for node %s", node))
|
2016-05-23 12:10:40 +00:00
|
|
|
}
|