mirror of https://github.com/k3s-io/k3s
Merge pull request #21474 from quinton-hoole/2016-01-12-ubelite-e2e-spreading
Add Ubernetes Lite e2e tests for spreading RC and Service pods evenly…pull/6/head
commit
307ec46bce
|
@ -33,7 +33,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
podListTimeout = time.Minute
|
||||
serverStartTimeout = podStartTimeout + 3*time.Minute
|
||||
)
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
|
@ -117,12 +116,3 @@ func ServeImageOrFail(f *Framework, test string, image string) {
|
|||
Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
func isElementOf(podUID types.UID, pods *api.PodList) bool {
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == podUID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -46,49 +46,6 @@ func getPodsScheduled(pods *api.PodList) (scheduledPods, notScheduledPods []api.
|
|||
return
|
||||
}
|
||||
|
||||
// Simplified version of RunRC, that does not create RC, but creates plain Pods and
|
||||
// requires passing whole Pod definition, which is needed to test various Scheduler predicates.
|
||||
func startPods(c *client.Client, replicas int, ns string, podNamePrefix string, pod api.Pod) {
|
||||
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
|
||||
expectNoError(err)
|
||||
podsScheduledBefore, _ := getPodsScheduled(allPods)
|
||||
|
||||
for i := 0; i < replicas; i++ {
|
||||
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
|
||||
pod.ObjectMeta.Name = podName
|
||||
pod.ObjectMeta.Labels["name"] = podName
|
||||
pod.Spec.Containers[0].Name = podName
|
||||
_, err = c.Pods(ns).Create(&pod)
|
||||
expectNoError(err)
|
||||
}
|
||||
|
||||
// Wait for pods to start running. Note: this is a functional
|
||||
// test, not a performance test, so the timeout needs to be
|
||||
// sufficiently long that it's only triggered if things are
|
||||
// completely broken vs. running slowly.
|
||||
timeout := 10 * time.Minute
|
||||
startTime := time.Now()
|
||||
currentlyScheduledPods := 0
|
||||
for len(podsScheduledBefore)+replicas != currentlyScheduledPods {
|
||||
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
|
||||
expectNoError(err)
|
||||
scheduledPods := 0
|
||||
for _, pod := range allPods.Items {
|
||||
if pod.Spec.NodeName != "" {
|
||||
scheduledPods += 1
|
||||
}
|
||||
}
|
||||
currentlyScheduledPods = scheduledPods
|
||||
Logf("%v pods running", currentlyScheduledPods)
|
||||
if startTime.Add(timeout).Before(time.Now()) {
|
||||
Logf("Timed out after %v waiting for pods to start running.", timeout)
|
||||
break
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
Expect(currentlyScheduledPods).To(Equal(len(podsScheduledBefore) + replicas))
|
||||
}
|
||||
|
||||
func getRequestedCPU(pod api.Pod) int64 {
|
||||
var result int64
|
||||
for _, container := range pod.Spec.Containers {
|
||||
|
@ -247,7 +204,7 @@ var _ = Describe("SchedulerPredicates [Serial]", func() {
|
|||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}, true)
|
||||
|
||||
podName := "additional-pod"
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
|
@ -333,7 +290,7 @@ var _ = Describe("SchedulerPredicates [Serial]", func() {
|
|||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}, true)
|
||||
|
||||
podName := "additional-pod"
|
||||
_, err = c.Pods(ns).Create(&api.Pod{
|
||||
|
|
|
@ -0,0 +1,234 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
var _ = Describe("Ubernetes Lite", func() {
|
||||
framework := NewFramework("ubernetes-lite")
|
||||
var zoneCount int
|
||||
var err error
|
||||
image := "gcr.io/google_containers/serve_hostname:1.1"
|
||||
BeforeEach(func() {
|
||||
if zoneCount <= 0 {
|
||||
zoneCount, err = getZoneCount(framework.Client)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
|
||||
SkipUnlessAtLeast(zoneCount, 2, "Zone count is %d, only run for multi-zone clusters, skipping test")
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
|
||||
})
|
||||
It("should spread the pods of a service across zones", func() {
|
||||
SpreadServiceOrFail(framework, (2*zoneCount)+1, image)
|
||||
})
|
||||
|
||||
It("should spread the pods of a replication controller across zones", func() {
|
||||
SpreadRCOrFail(framework, (2*zoneCount)+1, image)
|
||||
})
|
||||
})
|
||||
|
||||
// Check that the pods comprising a service get spread evenly across available zones
|
||||
func SpreadServiceOrFail(f *Framework, replicaCount int, image string) {
|
||||
// First create the service
|
||||
serviceName := "test-service"
|
||||
serviceSpec := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"service": serviceName,
|
||||
},
|
||||
Ports: []api.ServicePort{{
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(80),
|
||||
}},
|
||||
},
|
||||
}
|
||||
_, err := f.Client.Services(f.Namespace.Name).Create(serviceSpec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Now create some pods behind the service
|
||||
podSpec := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Labels: map[string]string{"service": serviceName},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
startPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)
|
||||
|
||||
// Wait for all of them to be scheduled
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
|
||||
pods, err := waitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Now make sure they're spread across zones
|
||||
zoneNames, err := getZoneNames(f.Client)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
|
||||
}
|
||||
|
||||
// Find the name of the zone in which a Node is running
|
||||
func getZoneNameForNode(node api.Node) (string, error) {
|
||||
for key, value := range node.Labels {
|
||||
if key == unversioned.LabelZoneFailureDomain {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Zone name for node %s not found. No label with key %s",
|
||||
node.Name, unversioned.LabelZoneFailureDomain)
|
||||
}
|
||||
|
||||
// Find the names of all zones in which we have nodes in this cluster.
|
||||
func getZoneNames(c *client.Client) ([]string, error) {
|
||||
zoneNames := sets.NewString()
|
||||
nodes, err := c.Nodes().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
zoneName, err := getZoneNameForNode(node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
zoneNames.Insert(zoneName)
|
||||
}
|
||||
return zoneNames.List(), nil
|
||||
}
|
||||
|
||||
// Return the number of zones in which we have nodes in this cluster.
|
||||
func getZoneCount(c *client.Client) (int, error) {
|
||||
zoneNames, err := getZoneNames(c)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return len(zoneNames), nil
|
||||
}
|
||||
|
||||
// Find the name of the zone in which the pod is scheduled
|
||||
func getZoneNameForPod(c *client.Client, pod api.Pod) (string, error) {
|
||||
By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
|
||||
node, err := c.Nodes().Get(pod.Spec.NodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return getZoneNameForNode(*node)
|
||||
}
|
||||
|
||||
// Determine whether a set of pods are approximately evenly spread
|
||||
// across a given set of zones
|
||||
func checkZoneSpreading(c *client.Client, pods *api.PodList, zoneNames []string) (bool, error) {
|
||||
podsPerZone := make(map[string]int)
|
||||
for _, zoneName := range zoneNames {
|
||||
podsPerZone[zoneName] = 0
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
zoneName, err := getZoneNameForPod(c, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
podsPerZone[zoneName] = podsPerZone[zoneName] + 1
|
||||
}
|
||||
minPodsPerZone := math.MaxInt32
|
||||
maxPodsPerZone := 0
|
||||
for _, podCount := range podsPerZone {
|
||||
if podCount < minPodsPerZone {
|
||||
minPodsPerZone = podCount
|
||||
}
|
||||
if podCount > maxPodsPerZone {
|
||||
maxPodsPerZone = podCount
|
||||
}
|
||||
}
|
||||
Expect(minPodsPerZone).To(BeNumerically("~", maxPodsPerZone, 1),
|
||||
"Pods were not evenly spread across zones. %d in one zone and %d in another zone",
|
||||
minPodsPerZone, maxPodsPerZone)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Check that the pods comprising a replication controller get spread evenly across available zones
|
||||
func SpreadRCOrFail(f *Framework, replicaCount int, image string) {
|
||||
name := "ubelite-spread-rc-" + string(util.NewUUID())
|
||||
By(fmt.Sprintf("Creating replication controller %s", name))
|
||||
controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: replicaCount,
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Ports: []api.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Cleanup the replication controller when we are done.
|
||||
defer func() {
|
||||
// Resize the replication controller to zero to get rid of pods.
|
||||
if err := DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
|
||||
Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
|
||||
}
|
||||
}()
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicaCount)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for all of them to be scheduled
|
||||
By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
|
||||
pods, err = waitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Now make sure they're spread across zones
|
||||
zoneNames, err := getZoneNames(f.Client)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
|
||||
}
|
|
@ -53,6 +53,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
sshutil "k8s.io/kubernetes/pkg/ssh"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
@ -72,6 +73,8 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// How long to wait for the pod to be listable
|
||||
podListTimeout = time.Minute
|
||||
// Initial pod start can be delayed O(minutes) by slow docker pulls
|
||||
// TODO: Make this 30 seconds once #4566 is resolved.
|
||||
podStartTimeout = 5 * time.Minute
|
||||
|
@ -111,6 +114,9 @@ const (
|
|||
// How long pods have to be "ready" when a test begins.
|
||||
podReadyBeforeTimeout = 2 * time.Minute
|
||||
|
||||
// How long pods have to become scheduled onto nodes
|
||||
podScheduledBeforeTimeout = podListTimeout + (20 * time.Second)
|
||||
|
||||
podRespondingTimeout = 2 * time.Minute
|
||||
serviceRespondingTimeout = 2 * time.Minute
|
||||
endpointRegisterTimeout = time.Minute
|
||||
|
@ -307,6 +313,12 @@ func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
|
|||
}
|
||||
}
|
||||
|
||||
func SkipUnlessAtLeast(value int, minValue int, message string) {
|
||||
if value < minValue {
|
||||
Skipf(message)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipIfProviderIs(unsupportedProviders ...string) {
|
||||
if providerIs(unsupportedProviders...) {
|
||||
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, testContext.Provider)
|
||||
|
@ -1836,6 +1848,26 @@ func (config *RCConfig) start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
|
||||
// optionally waits for pods to start running (if waitForRunning == true)
|
||||
func startPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) {
|
||||
startPodsID := string(util.NewUUID()) // So that we can label and find them
|
||||
for i := 0; i < replicas; i++ {
|
||||
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
|
||||
pod.ObjectMeta.Name = podName
|
||||
pod.ObjectMeta.Labels["name"] = podName
|
||||
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
|
||||
pod.Spec.Containers[0].Name = podName
|
||||
_, err := c.Pods(namespace).Create(&pod)
|
||||
expectNoError(err)
|
||||
}
|
||||
if waitForRunning {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
|
||||
err := waitForPodsWithLabelRunning(c, namespace, label)
|
||||
expectNoError(err, "Error waiting for %d pods to be running - probably a timeout", replicas)
|
||||
}
|
||||
}
|
||||
|
||||
func dumpPodDebugInfo(c *client.Client, pods []*api.Pod) {
|
||||
badNodes := sets.NewString()
|
||||
for _, p := range pods {
|
||||
|
@ -2028,7 +2060,42 @@ waitLoop:
|
|||
return nil
|
||||
}
|
||||
|
||||
// Wait up to 10 minutes for getting pods with certain label
|
||||
// Returns true if all the specified pods are scheduled, else returns false.
|
||||
func podsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (bool, error) {
|
||||
podStore := newPodStore(c, ns, label, fields.Everything())
|
||||
defer podStore.Stop()
|
||||
pods := podStore.List()
|
||||
if len(pods) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Wait for all matching pods to become scheduled and at least one
|
||||
// matching pod exists. Return the list of matching pods.
|
||||
func waitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
|
||||
err = wait.PollImmediate(poll, podScheduledBeforeTimeout,
|
||||
func() (bool, error) {
|
||||
pods, err = waitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return pods, err
|
||||
}
|
||||
|
||||
// Wait up to podListTimeout for getting pods with certain label
|
||||
func waitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
|
||||
for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
|
||||
options := api.ListOptions{LabelSelector: label}
|
||||
|
@ -3149,3 +3216,12 @@ func unblockNetwork(from string, to string) {
|
|||
"required on host %s: remove rule %s, if exists", from, iptablesRule)
|
||||
}
|
||||
}
|
||||
|
||||
func isElementOf(podUID types.UID, pods *api.PodList) bool {
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == podUID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue