k3s/test/e2e/cluster_size_autoscaling.go

195 lines
6.9 KiB
Go
Raw Normal View History

/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api"
2016-05-30 14:45:47 +00:00
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/test/e2e/framework"
2016-05-30 14:45:47 +00:00
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
2016-05-30 14:45:47 +00:00
defaultTimeout = 3 * time.Minute
resizeTimeout = 5 * time.Minute
scaleUpTimeout = 5 * time.Minute
scaleDownTimeout = 15 * time.Minute
)
2016-05-30 14:45:47 +00:00
var _ = framework.KubeDescribe("Cluster size autoscaling scale up [Feature:ClusterSizeAutoscaling] [Slow]", func() {
f := framework.NewDefaultFramework("autoscaling")
var nodeCount int
var coresPerNode int
var memCapacityMb int
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
mem := nodes.Items[0].Status.Capacity[api.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000)
memCapacityMb = int((&mem).Value() / 1024 / 1024)
})
It("Should correctly handle pending pods", func() {
By("Too large pending pod does not increase cluster size")
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
// Verify, that cluster size is not changed.
// TODO: find a better way of verification that the cluster size will remain unchanged.
2016-05-30 14:45:47 +00:00
time.Sleep(scaleUpTimeout)
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleUpTimeout))
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
2016-05-30 14:45:47 +00:00
framework.ExpectNoError(ResizeGroup(int32(nodeCount)))
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, resizeTimeout))
By("Small pending pods increase cluster size")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
// Verify, that cluster size is increased
2016-05-30 14:45:47 +00:00
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
framework.ExpectNoError(ResizeGroup(int32(nodeCount)))
2016-05-30 14:45:47 +00:00
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, resizeTimeout))
By("Handling node port pods")
CreateHostPortPods(f, "host-port", nodeCount+2, false)
2016-05-30 14:45:47 +00:00
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
framework.ExpectNoError(ResizeGroup(int32(nodeCount)))
2016-05-30 14:45:47 +00:00
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, resizeTimeout))
})
})
var _ = framework.KubeDescribe("Cluster size autoscaling scale down[Feature:ClusterSizeAutoscalingScaleDown] [Slow]", func() {
f := framework.NewDefaultFramework("autoscaling")
var nodeCount int
var coresPerNode int
var memCapacityMb int
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
mem := nodes.Items[0].Status.Capacity[api.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000)
memCapacityMb = int((&mem).Value() / 1024 / 1024)
})
It("Should correctly handle pending and scale down after deletion", func() {
By("Small pending pods increase cluster size")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
// Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size < nodeCount+1 }, scaleDownTimeout))
})
})
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port"))
config := &framework.RCConfig{
Client: f.Client,
Name: id,
Namespace: f.Namespace.Name,
2016-05-30 14:45:47 +00:00
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.Client),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}
func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)
config := &framework.RCConfig{
Client: f.Client,
Name: id,
Namespace: f.Namespace.Name,
2016-05-30 14:45:47 +00:00
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.Client),
Replicas: replicas,
CpuRequest: request,
}
framework.ExpectNoError(framework.RunRC(*config))
}
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)
config := &framework.RCConfig{
Client: f.Client,
Name: id,
Namespace: f.Namespace.Name,
2016-05-30 14:45:47 +00:00
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.Client),
Replicas: replicas,
MemRequest: request,
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}
2016-05-30 14:45:47 +00:00
// WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c *client.Client, sizeFunc func(int) bool, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
if err != nil {
glog.Warningf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
framework.FilterNodes(nodes, func(node api.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == numReady && sizeFunc(numReady) {
glog.Infof("Cluster has reached the desired size")
return nil
}
glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}