2015-07-21 14:15:55 +00:00
|
|
|
/*
|
|
|
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
2015-08-14 09:50:19 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
2016-05-30 14:45:47 +00:00
|
|
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
|
|
|
"k8s.io/kubernetes/pkg/fields"
|
2016-04-07 17:21:31 +00:00
|
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
2015-08-14 09:50:19 +00:00
|
|
|
|
2016-05-30 14:45:47 +00:00
|
|
|
"github.com/golang/glog"
|
2015-07-21 14:15:55 +00:00
|
|
|
. "github.com/onsi/ginkgo"
|
2015-08-14 09:50:19 +00:00
|
|
|
. "github.com/onsi/gomega"
|
2015-07-21 14:15:55 +00:00
|
|
|
)
|
|
|
|
|
2015-10-07 09:15:58 +00:00
|
|
|
const (
|
2016-05-30 14:45:47 +00:00
|
|
|
defaultTimeout = 3 * time.Minute
|
|
|
|
resizeTimeout = 5 * time.Minute
|
|
|
|
scaleUpTimeout = 5 * time.Minute
|
|
|
|
scaleDownTimeout = 15 * time.Minute
|
2015-10-07 09:15:58 +00:00
|
|
|
)
|
|
|
|
|
2016-05-30 14:45:47 +00:00
|
|
|
var _ = framework.KubeDescribe("Cluster size autoscaling scale up [Feature:ClusterSizeAutoscaling] [Slow]", func() {
|
2016-04-07 17:21:31 +00:00
|
|
|
f := framework.NewDefaultFramework("autoscaling")
|
2015-08-14 09:50:19 +00:00
|
|
|
var nodeCount int
|
|
|
|
var coresPerNode int
|
2015-08-19 13:05:13 +00:00
|
|
|
var memCapacityMb int
|
2015-07-21 14:15:55 +00:00
|
|
|
|
|
|
|
BeforeEach(func() {
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.SkipUnlessProviderIs("gce")
|
2015-08-14 09:50:19 +00:00
|
|
|
|
2016-05-05 20:56:25 +00:00
|
|
|
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
2015-08-14 09:50:19 +00:00
|
|
|
nodeCount = len(nodes.Items)
|
|
|
|
Expect(nodeCount).NotTo(BeZero())
|
2015-08-19 13:05:13 +00:00
|
|
|
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
|
|
|
|
mem := nodes.Items[0].Status.Capacity[api.ResourceMemory]
|
|
|
|
coresPerNode = int((&cpu).MilliValue() / 1000)
|
|
|
|
memCapacityMb = int((&mem).Value() / 1024 / 1024)
|
2015-07-21 14:15:55 +00:00
|
|
|
})
|
|
|
|
|
2016-05-16 11:27:18 +00:00
|
|
|
It("Should correctly handle pending pods", func() {
|
|
|
|
By("Too large pending pod does not increase cluster size")
|
|
|
|
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
|
|
|
|
// Verify, that cluster size is not changed.
|
|
|
|
// TODO: find a better way of verification that the cluster size will remain unchanged.
|
2016-05-30 14:45:47 +00:00
|
|
|
time.Sleep(scaleUpTimeout)
|
|
|
|
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleUpTimeout))
|
2016-05-16 11:27:18 +00:00
|
|
|
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
|
2016-05-30 14:45:47 +00:00
|
|
|
framework.ExpectNoError(ResizeGroup(int32(nodeCount)))
|
|
|
|
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, resizeTimeout))
|
2015-07-21 14:15:55 +00:00
|
|
|
|
2016-05-16 11:27:18 +00:00
|
|
|
By("Small pending pods increase cluster size")
|
|
|
|
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
|
|
|
|
// Verify, that cluster size is increased
|
2016-05-30 14:45:47 +00:00
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
|
2016-05-17 13:07:30 +00:00
|
|
|
framework.ExpectNoError(ResizeGroup(int32(nodeCount)))
|
2016-05-30 14:45:47 +00:00
|
|
|
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, resizeTimeout))
|
2016-05-17 13:07:30 +00:00
|
|
|
|
|
|
|
By("Handling node port pods")
|
|
|
|
CreateHostPortPods(f, "host-port", nodeCount+2, false)
|
2016-05-30 14:45:47 +00:00
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
|
|
|
|
|
2016-05-17 13:07:30 +00:00
|
|
|
framework.ExpectNoError(ResizeGroup(int32(nodeCount)))
|
2016-05-30 14:45:47 +00:00
|
|
|
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, resizeTimeout))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
var _ = framework.KubeDescribe("Cluster size autoscaling scale down[Feature:ClusterSizeAutoscalingScaleDown] [Slow]", func() {
|
|
|
|
f := framework.NewDefaultFramework("autoscaling")
|
|
|
|
var nodeCount int
|
|
|
|
var coresPerNode int
|
|
|
|
var memCapacityMb int
|
|
|
|
|
|
|
|
BeforeEach(func() {
|
|
|
|
framework.SkipUnlessProviderIs("gce")
|
|
|
|
|
|
|
|
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
|
|
|
nodeCount = len(nodes.Items)
|
|
|
|
Expect(nodeCount).NotTo(BeZero())
|
|
|
|
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
|
|
|
|
mem := nodes.Items[0].Status.Capacity[api.ResourceMemory]
|
|
|
|
coresPerNode = int((&cpu).MilliValue() / 1000)
|
|
|
|
memCapacityMb = int((&mem).Value() / 1024 / 1024)
|
|
|
|
})
|
|
|
|
|
|
|
|
It("Should correctly handle pending and scale down after deletion", func() {
|
|
|
|
By("Small pending pods increase cluster size")
|
|
|
|
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
|
|
|
|
// Verify, that cluster size is increased
|
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
|
|
|
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
|
|
|
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
|
|
|
func(size int) bool { return size < nodeCount+1 }, scaleDownTimeout))
|
2015-07-21 14:15:55 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
|
2016-05-17 13:07:30 +00:00
|
|
|
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
|
|
|
|
By(fmt.Sprintf("Running RC which reserves host port"))
|
|
|
|
config := &framework.RCConfig{
|
|
|
|
Client: f.Client,
|
|
|
|
Name: id,
|
|
|
|
Namespace: f.Namespace.Name,
|
2016-05-30 14:45:47 +00:00
|
|
|
Timeout: defaultTimeout,
|
2016-05-26 16:16:43 +00:00
|
|
|
Image: framework.GetPauseImageName(f.Client),
|
2016-05-17 13:07:30 +00:00
|
|
|
Replicas: replicas,
|
|
|
|
HostPorts: map[string]int{"port1": 4321},
|
|
|
|
}
|
|
|
|
err := framework.RunRC(*config)
|
|
|
|
if expectRunning {
|
|
|
|
framework.ExpectNoError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-05-16 11:27:18 +00:00
|
|
|
func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
|
2015-07-21 14:15:55 +00:00
|
|
|
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
|
2016-05-16 11:27:18 +00:00
|
|
|
request := int64(millicores / replicas)
|
2016-04-07 17:21:31 +00:00
|
|
|
config := &framework.RCConfig{
|
2015-09-25 09:41:26 +00:00
|
|
|
Client: f.Client,
|
|
|
|
Name: id,
|
|
|
|
Namespace: f.Namespace.Name,
|
2016-05-30 14:45:47 +00:00
|
|
|
Timeout: defaultTimeout,
|
2016-05-26 16:16:43 +00:00
|
|
|
Image: framework.GetPauseImageName(f.Client),
|
2016-05-16 11:27:18 +00:00
|
|
|
Replicas: replicas,
|
|
|
|
CpuRequest: request,
|
2015-07-21 14:15:55 +00:00
|
|
|
}
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.ExpectNoError(framework.RunRC(*config))
|
2015-07-21 14:15:55 +00:00
|
|
|
}
|
|
|
|
|
2016-05-16 11:27:18 +00:00
|
|
|
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool) {
|
2015-09-25 09:41:26 +00:00
|
|
|
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
|
2016-05-16 11:27:18 +00:00
|
|
|
request := int64(1024 * 1024 * megabytes / replicas)
|
2016-04-07 17:21:31 +00:00
|
|
|
config := &framework.RCConfig{
|
2015-09-25 09:41:26 +00:00
|
|
|
Client: f.Client,
|
|
|
|
Name: id,
|
|
|
|
Namespace: f.Namespace.Name,
|
2016-05-30 14:45:47 +00:00
|
|
|
Timeout: defaultTimeout,
|
2016-05-26 16:16:43 +00:00
|
|
|
Image: framework.GetPauseImageName(f.Client),
|
2016-05-16 11:27:18 +00:00
|
|
|
Replicas: replicas,
|
|
|
|
MemRequest: request,
|
|
|
|
}
|
|
|
|
err := framework.RunRC(*config)
|
|
|
|
if expectRunning {
|
|
|
|
framework.ExpectNoError(err)
|
2015-07-21 14:15:55 +00:00
|
|
|
}
|
|
|
|
}
|
2016-05-30 14:45:47 +00:00
|
|
|
|
|
|
|
// WaitForClusterSize waits until the cluster size matches the given function.
|
|
|
|
func WaitForClusterSizeFunc(c *client.Client, sizeFunc func(int) bool, timeout time.Duration) error {
|
|
|
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
|
|
|
nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
|
|
|
|
"spec.unschedulable": "false",
|
|
|
|
}.AsSelector()})
|
|
|
|
if err != nil {
|
|
|
|
glog.Warningf("Failed to list nodes: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
numNodes := len(nodes.Items)
|
|
|
|
|
|
|
|
// Filter out not-ready nodes.
|
|
|
|
framework.FilterNodes(nodes, func(node api.Node) bool {
|
|
|
|
return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
|
|
|
|
})
|
|
|
|
numReady := len(nodes.Items)
|
|
|
|
|
|
|
|
if numNodes == numReady && sizeFunc(numReady) {
|
|
|
|
glog.Infof("Cluster has reached the desired size")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
|
|
|
|
}
|
|
|
|
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
|
|
|
|
}
|