Merge pull request #24445 from vishh/framework-node-e2e

Automatic merge from submit-queue

Framework support for node e2e.

This should let us port existing e2e tests to the node e2e suite, if the tests are node specific.
pull/6/head
k8s-merge-robot 2016-05-02 02:17:48 -07:00
commit 66730c53c5
6 changed files with 439 additions and 275 deletions

View File

@ -18,7 +18,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
focus=${FOCUS}
focus=${FOCUS:-""}
skip=${SKIP:-""}
ginkgo=$(kube::util::find-binary "ginkgo")

View File

@ -94,14 +94,15 @@ func NewDefaultFramework(baseName string) *Framework {
ClientQPS: 20,
ClientBurst: 50,
}
return NewFramework(baseName, options)
return NewFramework(baseName, options, nil)
}
func NewFramework(baseName string, options FrameworkOptions) *Framework {
func NewFramework(baseName string, options FrameworkOptions, client *client.Client) *Framework {
f := &Framework{
BaseName: baseName,
AddonResourceConstraints: make(map[string]ResourceConstraint),
options: options,
Client: client,
}
BeforeEach(f.BeforeEach)
@ -116,17 +117,18 @@ func (f *Framework) BeforeEach() {
// https://github.com/onsi/ginkgo/issues/222
f.cleanupHandle = AddCleanupAction(f.AfterEach)
By("Creating a kubernetes client")
config, err := LoadConfig()
Expect(err).NotTo(HaveOccurred())
config.QPS = f.options.ClientQPS
config.Burst = f.options.ClientBurst
c, err := loadClientFromConfig(config)
Expect(err).NotTo(HaveOccurred())
f.Client = c
f.Clientset_1_2 = adapter_1_2.FromUnversionedClient(c)
f.Clientset_1_3 = adapter_1_3.FromUnversionedClient(c)
if f.Client == nil {
By("Creating a kubernetes client")
config, err := LoadConfig()
Expect(err).NotTo(HaveOccurred())
config.QPS = f.options.ClientQPS
config.Burst = f.options.ClientBurst
c, err := loadClientFromConfig(config)
Expect(err).NotTo(HaveOccurred())
f.Client = c
}
f.Clientset_1_2 = adapter_1_2.FromUnversionedClient(f.Client)
f.Clientset_1_3 = adapter_1_3.FromUnversionedClient(f.Client)
By("Building a namespace api object")
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
@ -138,14 +140,14 @@ func (f *Framework) BeforeEach() {
if TestContext.VerifyServiceAccount {
By("Waiting for a default service account to be provisioned in namespace")
err = WaitForDefaultServiceAccountInNamespace(c, namespace.Name)
err = WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name)
Expect(err).NotTo(HaveOccurred())
} else {
Logf("Skipping waiting for service account")
}
if TestContext.GatherKubeSystemResourceUsageData {
f.gatherer, err = NewResourceUsageGatherer(c, ResourceGathererOptions{inKubemark: ProviderIs("kubemark")})
f.gatherer, err = NewResourceUsageGatherer(f.Client, ResourceGathererOptions{inKubemark: ProviderIs("kubemark")})
if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else {
@ -157,7 +159,7 @@ func (f *Framework) BeforeEach() {
f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(c, f.logsSizeCloseChannel)
f.logsSizeVerifier = NewLogsVerifier(f.Client, f.logsSizeCloseChannel)
go func() {
f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done()

View File

@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
ClientQPS: 50,
ClientBurst: 100,
}
f := framework.NewFramework("load", options)
f := framework.NewFramework("load", options, nil)
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {

View File

@ -35,13 +35,6 @@ import (
. "github.com/onsi/gomega"
)
var kubeletAddress = flag.String("kubelet-address", "http://127.0.0.1:10255", "Host and port of the kubelet")
var apiServerAddress = flag.String("api-server-address", "http://127.0.0.1:8080", "Host and port of the api server")
var nodeName = flag.String("node-name", "", "Name of the node")
var buildServices = flag.Bool("build-services", true, "If true, build local executables")
var startServices = flag.Bool("start-services", true, "If true, start local node services")
var stopServices = flag.Bool("stop-services", true, "If true, stop local node services after running tests")
var e2es *e2eService
func TestE2eNode(t *testing.T) {

View File

@ -26,284 +26,181 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
apiUnversioned "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Kubelet", func() {
var cl *client.Client
BeforeEach(func() {
// Setup the apiserver client
cl = client.NewOrDie(&restclient.Config{Host: *apiServerAddress})
})
Describe("pod scheduling", func() {
namespace := "pod-scheduling"
Context("when scheduling a busybox command in a pod", func() {
podName := "busybox-scheduling"
It("it should return succes", func() {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
Namespace: namespace,
},
Spec: api.PodSpec{
// Force the Pod to schedule to the node without a scheduler running
NodeName: *nodeName,
// Don't restart the Pod since it is expected to exit
RestartPolicy: api.RestartPolicyNever,
Containers: []api.Container{
{
Image: "gcr.io/google_containers/busybox",
Name: podName,
Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"},
},
var _ = framework.KubeDescribe("Kubelet", func() {
Context("when scheduling a busybox command in a pod", func() {
// Setup the framework
f := NewDefaultFramework("pod-scheduling")
podName := "busybox-scheduling-" + string(util.NewUUID())
It("it should print the output to logs", func() {
podClient := f.Client.Pods(f.Namespace.Name)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
// Force the Pod to schedule to the node without a scheduler running
NodeName: *nodeName,
// Don't restart the Pod since it is expected to exit
RestartPolicy: api.RestartPolicyNever,
Containers: []api.Container{
{
Image: "gcr.io/google_containers/busybox",
Name: podName,
Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"},
},
},
}
_, err := cl.Pods(namespace).Create(pod)
Expect(err).To(BeNil(), fmt.Sprintf("Error creating Pod %v", err))
})
It("it should print the output to logs", func() {
Eventually(func() string {
sinceTime := unversioned.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
rc, err := cl.Pods(namespace).GetLogs(podName, &api.PodLogOptions{SinceTime: &sinceTime}).Stream()
if err != nil {
return ""
}
defer rc.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(rc)
return buf.String()
}, time.Minute, time.Second*4).Should(Equal("Hello World\n"))
})
It("it should be possible to delete", func() {
err := cl.Pods(namespace).Delete(podName, &api.DeleteOptions{})
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
})
})
Context("when scheduling a read only busybox container", func() {
podName := "busybox-readonly-fs"
It("it should return success", func() {
isReadOnly := true
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
Namespace: namespace,
},
Spec: api.PodSpec{
// Force the Pod to schedule to the node without a scheduler running
NodeName: *nodeName,
// Don't restart the Pod since it is expected to exit
RestartPolicy: api.RestartPolicyNever,
Containers: []api.Container{
{
Image: "gcr.io/google_containers/busybox",
Name: podName,
Command: []string{"sh", "-c", "echo test > /file; sleep 240"},
SecurityContext: &api.SecurityContext{
ReadOnlyRootFilesystem: &isReadOnly,
},
},
},
},
}
_, err := cl.Pods(namespace).Create(pod)
Expect(err).To(BeNil(), fmt.Sprintf("Error creating Pod %v", err))
})
It("it should not write to the root filesystem", func() {
Eventually(func() string {
rc, err := cl.Pods(namespace).GetLogs(podName, &api.PodLogOptions{}).Stream()
if err != nil {
return ""
}
defer rc.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(rc)
return buf.String()
}, time.Minute, time.Second*4).Should(Equal("sh: can't create /file: Read-only file system\n"))
})
It("it should be possible to delete", func() {
err := cl.Pods(namespace).Delete(podName, &api.DeleteOptions{})
Expect(err).To(BeNil(), fmt.Sprintf("Error creating Pod %v", err))
})
})
})
Describe("metrics api", func() {
namespace := "kubelet-metrics-api"
statsPrefix := "stats-busybox-"
podNames := []string{}
podCount := 2
for i := 0; i < podCount; i++ {
podNames = append(podNames, fmt.Sprintf("%s%v", statsPrefix, i))
}
BeforeEach(func() {
for _, podName := range podNames {
createPod(cl, podName, namespace, []api.Container{
{
Image: "gcr.io/google_containers/busybox",
Command: []string{"sh", "-c", "while true; do echo 'hello world' | tee ~/file | tee /test-empty-dir-mnt ; sleep 1; done"},
Name: podName + containerSuffix,
VolumeMounts: []api.VolumeMount{
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
},
},
}, []api.Volume{
// TODO: Test secret volumes
// TODO: Test hostpath volumes
{Name: "test-empty-dir", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
})
},
}
// Sleep long enough for cadvisor to see the pod and calculate all of its metrics
// TODO: Get this to work with polling / eventually
time.Sleep(time.Minute * 2)
defer podClient.Delete(pod.Name, nil)
_, err := podClient.Create(pod)
Expect(err).To(BeNil(), fmt.Sprintf("Error creating Pod %v", err))
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
Eventually(func() string {
sinceTime := apiUnversioned.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
rc, err := podClient.GetLogs(podName, &api.PodLogOptions{SinceTime: &sinceTime}).Stream()
if err != nil {
return ""
}
defer rc.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(rc)
return buf.String()
}, time.Minute, time.Second*4).Should(Equal("Hello World\n"))
})
})
Context("when scheduling a read only busybox container", func() {
f := NewDefaultFramework("pod-scheduling")
podName := "busybox-readonly-fs" + string(util.NewUUID())
It("it should not write to root filesystem", func() {
podClient := f.Client.Pods(f.Namespace.Name)
isReadOnly := true
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
// Force the Pod to schedule to the node without a scheduler running
NodeName: *nodeName,
// Don't restart the Pod since it is expected to exit
RestartPolicy: api.RestartPolicyNever,
Containers: []api.Container{
{
Image: "gcr.io/google_containers/busybox",
Name: podName,
Command: []string{"sh", "-c", "echo test > /file; sleep 240"},
SecurityContext: &api.SecurityContext{
ReadOnlyRootFilesystem: &isReadOnly,
},
},
},
},
}
defer podClient.Delete(pod.Name, nil)
_, err := podClient.Create(pod)
Expect(err).To(BeNil(), fmt.Sprintf("Error creating Pod %v", err))
Eventually(func() string {
rc, err := podClient.GetLogs(podName, &api.PodLogOptions{}).Stream()
if err != nil {
return ""
}
defer rc.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(rc)
return buf.String()
}, time.Minute, time.Second*4).Should(Equal("sh: can't create /file: Read-only file system\n"))
})
})
Describe("metrics api", func() {
// Setup the framework
f := NewDefaultFramework("kubelet-metrics-api")
Context("when querying /stats/summary", func() {
It("it should report resource usage through the stats api", func() {
podNamePrefix := "stats-busybox-" + string(util.NewUUID())
volumeNamePrefix := "test-empty-dir"
podNames, volumes := createSummaryTestPods(f, podNamePrefix, 2, volumeNamePrefix)
By("Returning stats summary")
resp, err := http.Get(*kubeletAddress + "/stats/summary")
Expect(err).To(BeNil(), fmt.Sprintf("Failed to get /stats/summary"))
summary := stats.Summary{}
contentsBytes, err := ioutil.ReadAll(resp.Body)
Expect(err).To(BeNil(), fmt.Sprintf("Failed to read /stats/summary: %+v", resp))
contents := string(contentsBytes)
decoder := json.NewDecoder(strings.NewReader(contents))
err = decoder.Decode(&summary)
Expect(err).To(BeNil(), fmt.Sprintf("Failed to parse /stats/summary to go struct: %+v", resp))
By("Having resources for node")
Expect(summary.Node.NodeName).To(Equal(*nodeName))
Expect(summary.Node.CPU.UsageCoreNanoSeconds).NotTo(BeNil())
Expect(*summary.Node.CPU.UsageCoreNanoSeconds).NotTo(BeZero())
Expect(summary.Node.Memory.UsageBytes).NotTo(BeNil())
Expect(*summary.Node.Memory.UsageBytes).NotTo(BeZero())
Expect(summary.Node.Memory.WorkingSetBytes).NotTo(BeNil())
Expect(*summary.Node.Memory.WorkingSetBytes).NotTo(BeZero())
Expect(summary.Node.Fs.AvailableBytes).NotTo(BeNil())
Expect(*summary.Node.Fs.AvailableBytes).NotTo(BeZero())
Expect(summary.Node.Fs.CapacityBytes).NotTo(BeNil())
Expect(*summary.Node.Fs.CapacityBytes).NotTo(BeZero())
Expect(summary.Node.Fs.UsedBytes).NotTo(BeNil())
Expect(*summary.Node.Fs.UsedBytes).NotTo(BeZero())
By("Having container runtime's image storage information")
Expect(summary.Node.Runtime).NotTo(BeNil())
Expect(summary.Node.Runtime.ImageFs).NotTo(BeNil())
Expect(summary.Node.Runtime.ImageFs.AvailableBytes).NotTo(BeNil())
Expect(*summary.Node.Runtime.ImageFs.AvailableBytes).NotTo(BeZero())
Expect(summary.Node.Runtime.ImageFs.CapacityBytes).NotTo(BeNil())
Expect(*summary.Node.Runtime.ImageFs.CapacityBytes).NotTo(BeZero())
Expect(summary.Node.Runtime.ImageFs.UsedBytes).NotTo(BeNil())
Expect(*summary.Node.Runtime.ImageFs.UsedBytes).NotTo(BeZero())
By("Having resources for kubelet and runtime system containers")
sysContainers := map[string]stats.ContainerStats{}
sysContainersList := []string{}
for _, container := range summary.Node.SystemContainers {
sysContainers[container.Name] = container
sysContainersList = append(sysContainersList, container.Name)
ExpectContainerStatsNotEmpty(&container)
}
Expect(sysContainersList).To(ContainElement("kubelet"))
Expect(sysContainersList).To(ContainElement("runtime"))
// Verify Pods Stats are present
podsList := []string{}
By("Having resources for pods")
for _, pod := range summary.Pods {
if !strings.HasPrefix(pod.PodRef.Name, statsPrefix) {
// Ignore pods created outside this test
continue
Eventually(func() error {
resp, err := http.Get(*kubeletAddress + "/stats/summary")
if err != nil {
return fmt.Errorf("Failed to get /stats/summary - %v", err)
}
podsList = append(podsList, pod.PodRef.Name)
Expect(pod.Containers).To(HaveLen(1))
container := pod.Containers[0]
Expect(container.Name).To(Equal(pod.PodRef.Name + containerSuffix))
ExpectContainerStatsNotEmpty(&container)
// emptydir volume
volumeNames := []string{}
for _, vs := range pod.VolumeStats {
Expect(vs.CapacityBytes).NotTo(BeZero())
Expect(vs.AvailableBytes).NotTo(BeZero())
Expect(vs.UsedBytes).NotTo(BeZero())
volumeNames = append(volumeNames, vs.Name)
contentsBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("Failed to read /stats/summary - %+v", resp)
}
Expect(volumeNames).To(ConsistOf("test-empty-dir"))
// fs usage (not for system containers)
Expect(container.Rootfs).NotTo(BeNil(), spew.Sdump(container))
Expect(container.Rootfs.AvailableBytes).NotTo(BeNil(), spew.Sdump(container))
Expect(*container.Rootfs.AvailableBytes).NotTo(BeZero(), spew.Sdump(container))
Expect(container.Rootfs.CapacityBytes).NotTo(BeNil(), spew.Sdump(container))
Expect(*container.Rootfs.CapacityBytes).NotTo(BeZero(), spew.Sdump(container))
Expect(container.Rootfs.UsedBytes).NotTo(BeNil(), spew.Sdump(container))
Expect(*container.Rootfs.UsedBytes).NotTo(BeZero(), spew.Sdump(container))
Expect(container.Logs).NotTo(BeNil(), spew.Sdump(container))
Expect(container.Logs.AvailableBytes).NotTo(BeNil(), spew.Sdump(container))
Expect(*container.Logs.AvailableBytes).NotTo(BeZero(), spew.Sdump(container))
Expect(container.Logs.CapacityBytes).NotTo(BeNil(), spew.Sdump(container))
Expect(*container.Logs.CapacityBytes).NotTo(BeZero(), spew.Sdump(container))
Expect(container.Logs.UsedBytes).NotTo(BeNil(), spew.Sdump(container))
Expect(*container.Logs.UsedBytes).NotTo(BeZero(), spew.Sdump(container))
}
Expect(podsList).To(ConsistOf(podNames), spew.Sdump(summary))
contents := string(contentsBytes)
decoder := json.NewDecoder(strings.NewReader(contents))
err = decoder.Decode(&summary)
if err != nil {
return fmt.Errorf("Failed to parse /stats/summary to go struct: %+v", resp)
}
missingPods := podsMissingFromSummary(summary, podNames)
if missingPods.Len() != 0 {
return fmt.Errorf("expected pods not found. Following pods are missing - %v", missingPods)
}
missingVolumes := volumesMissingFromSummary(summary, volumes)
if missingVolumes.Len() != 0 {
return fmt.Errorf("expected volumes not found. Following volumes are missing - %v", missingVolumes)
}
if err := testSummaryMetrics(summary, podNamePrefix); err != nil {
return err
}
return nil
}, 5*time.Minute, time.Second*4).Should(BeNil())
})
})
AfterEach(func() {
for _, podName := range podNames {
err := cl.Pods(namespace).Delete(podName, &api.DeleteOptions{})
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", podName))
}
})
})
})
func ExpectContainerStatsNotEmpty(container *stats.ContainerStats) {
// TODO: Test Network
Expect(container.CPU).NotTo(BeNil(), spew.Sdump(container))
Expect(container.CPU.UsageCoreNanoSeconds).NotTo(BeNil(), spew.Sdump(container))
Expect(*container.CPU.UsageCoreNanoSeconds).NotTo(BeZero(), spew.Sdump(container))
Expect(container.Memory).NotTo(BeNil(), spew.Sdump(container))
Expect(container.Memory.UsageBytes).NotTo(BeNil(), spew.Sdump(container))
Expect(*container.Memory.UsageBytes).NotTo(BeZero(), spew.Sdump(container))
Expect(container.Memory.WorkingSetBytes).NotTo(BeNil(), spew.Sdump(container))
Expect(*container.Memory.WorkingSetBytes).NotTo(BeZero(), spew.Sdump(container))
}
const (
containerSuffix = "-c"
)
func createPod(cl *client.Client, podName string, namespace string, containers []api.Container, volumes []api.Volume) {
func createSummaryTestPods(f *framework.Framework, podNamePrefix string, count int, volumeNamePrefix string) (sets.String, sets.String) {
podNames := sets.NewString()
volumes := sets.NewString(volumeNamePrefix)
for i := 0; i < count; i++ {
podNames.Insert(fmt.Sprintf("%s%v", podNamePrefix, i))
}
for _, podName := range podNames.List() {
createPod(f, podName, []api.Container{
{
Image: "gcr.io/google_containers/busybox",
Command: []string{"sh", "-c", "while true; do echo 'hello world' | tee ~/file | tee /test-empty-dir-mnt ; sleep 1; done"},
Name: podName + containerSuffix,
VolumeMounts: []api.VolumeMount{
{MountPath: "/test-empty-dir-mnt", Name: volumeNamePrefix},
},
},
}, []api.Volume{
// TODO: Test secret volumes
// TODO: Test hostpath volumes
{Name: volumeNamePrefix, VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
})
}
return podNames, volumes
}
func createPod(f *framework.Framework, podName string, containers []api.Container, volumes []api.Volume) {
podClient := f.Client.Pods(f.Namespace.Name)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
Namespace: namespace,
Name: podName,
},
Spec: api.PodSpec{
// Force the Pod to schedule to the node without a scheduler running
@ -314,6 +211,238 @@ func createPod(cl *client.Client, podName string, namespace string, containers [
Volumes: volumes,
},
}
_, err := cl.Pods(namespace).Create(pod)
_, err := podClient.Create(pod)
Expect(err).To(BeNil(), fmt.Sprintf("Error creating Pod %v", err))
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
}
// Returns pods missing from summary.
func podsMissingFromSummary(s stats.Summary, expectedPods sets.String) sets.String {
expectedPods = sets.StringKeySet(expectedPods)
for _, pod := range s.Pods {
if expectedPods.Has(pod.PodRef.Name) {
expectedPods.Delete(pod.PodRef.Name)
}
}
return expectedPods
}
// Returns volumes missing from summary.
func volumesMissingFromSummary(s stats.Summary, expectedVolumes sets.String) sets.String {
for _, pod := range s.Pods {
expectedPodVolumes := sets.StringKeySet(expectedVolumes)
for _, vs := range pod.VolumeStats {
if expectedPodVolumes.Has(vs.Name) {
expectedPodVolumes.Delete(vs.Name)
}
}
if expectedPodVolumes.Len() != 0 {
return expectedPodVolumes
}
}
return sets.NewString()
}
func testSummaryMetrics(s stats.Summary, podNamePrefix string) error {
const (
nonNilValue = "expected %q to not be nil"
nonZeroValue = "expected %q to not be zero"
)
if s.Node.NodeName != *nodeName {
return fmt.Errorf("unexpected node name - %q", s.Node.NodeName)
}
if s.Node.CPU.UsageCoreNanoSeconds == nil {
return fmt.Errorf(nonNilValue, "cpu instantaneous")
}
if *s.Node.CPU.UsageCoreNanoSeconds == 0 {
return fmt.Errorf(nonZeroValue, "cpu instantaneous")
}
if s.Node.Memory.UsageBytes == nil {
return fmt.Errorf(nonNilValue, "memory")
}
if *s.Node.Memory.UsageBytes == 0 {
return fmt.Errorf(nonZeroValue, "memory")
}
if s.Node.Memory.WorkingSetBytes == nil {
return fmt.Errorf(nonNilValue, "memory working set")
}
if *s.Node.Memory.WorkingSetBytes == 0 {
return fmt.Errorf(nonZeroValue, "memory working set")
}
if s.Node.Fs.AvailableBytes == nil {
return fmt.Errorf(nonNilValue, "memory working set")
}
if *s.Node.Fs.AvailableBytes == 0 {
return fmt.Errorf(nonZeroValue, "node Fs available")
}
if s.Node.Fs.CapacityBytes == nil {
return fmt.Errorf(nonNilValue, "node fs capacity")
}
if *s.Node.Fs.CapacityBytes == 0 {
return fmt.Errorf(nonZeroValue, "node fs capacity")
}
if s.Node.Fs.UsedBytes == nil {
return fmt.Errorf(nonNilValue, "node fs used")
}
if *s.Node.Fs.UsedBytes == 0 {
return fmt.Errorf(nonZeroValue, "node fs used")
}
if s.Node.Runtime == nil {
return fmt.Errorf(nonNilValue, "node runtime")
}
if s.Node.Runtime.ImageFs == nil {
return fmt.Errorf(nonNilValue, "runtime image Fs")
}
if s.Node.Runtime.ImageFs.AvailableBytes == nil {
return fmt.Errorf(nonNilValue, "runtime image Fs available")
}
if *s.Node.Runtime.ImageFs.AvailableBytes == 0 {
return fmt.Errorf(nonZeroValue, "runtime image Fs available")
}
if s.Node.Runtime.ImageFs.CapacityBytes == nil {
return fmt.Errorf(nonNilValue, "runtime image Fs capacity")
}
if *s.Node.Runtime.ImageFs.CapacityBytes == 0 {
return fmt.Errorf(nonZeroValue, "runtime image Fs capacity")
}
if s.Node.Runtime.ImageFs.UsedBytes == nil {
return fmt.Errorf(nonNilValue, "runtime image Fs usage")
}
if *s.Node.Runtime.ImageFs.UsedBytes == 0 {
return fmt.Errorf(nonZeroValue, "runtime image Fs usage")
}
sysContainers := map[string]stats.ContainerStats{}
for _, container := range s.Node.SystemContainers {
sysContainers[container.Name] = container
if err := expectContainerStatsNotEmpty(&container); err != nil {
return err
}
}
if _, exists := sysContainers["kubelet"]; !exists {
return fmt.Errorf("expected metrics for kubelet")
}
if _, exists := sysContainers["runtime"]; !exists {
return fmt.Errorf("expected metrics for runtime")
}
// Verify Pods Stats are present
podsList := []string{}
By("Having resources for pods")
for _, pod := range s.Pods {
if !strings.HasPrefix(pod.PodRef.Name, podNamePrefix) {
// Ignore pods created outside this test
continue
}
podsList = append(podsList, pod.PodRef.Name)
if len(pod.Containers) != 1 {
return fmt.Errorf("expected only one container")
}
container := pod.Containers[0]
if container.Name != (pod.PodRef.Name + containerSuffix) {
return fmt.Errorf("unexpected container name - %q", container.Name)
}
if err := expectContainerStatsNotEmpty(&container); err != nil {
return err
}
// emptydir volume
foundExpectedVolume := false
for _, vs := range pod.VolumeStats {
if *vs.CapacityBytes == 0 {
return fmt.Errorf(nonZeroValue, "volume capacity")
}
if *vs.AvailableBytes == 0 {
return fmt.Errorf(nonZeroValue, "volume available")
}
if *vs.UsedBytes == 0 {
return fmt.Errorf(nonZeroValue, "volume used")
}
if vs.Name == "test-empty-dir" {
foundExpectedVolume = true
}
}
if !foundExpectedVolume {
return fmt.Errorf("expected 'test-empty-dir' volume")
}
// fs usage (not for system containers)
if container.Rootfs == nil {
return fmt.Errorf(nonNilValue+" - "+spew.Sdump(container), "container root fs")
}
if container.Rootfs.AvailableBytes == nil {
return fmt.Errorf(nonNilValue+" - "+spew.Sdump(container), "container root fs available")
}
if *container.Rootfs.AvailableBytes == 0 {
return fmt.Errorf(nonZeroValue+" - "+spew.Sdump(container), "container root fs available")
}
if container.Rootfs.CapacityBytes == nil {
return fmt.Errorf(nonNilValue+" - "+spew.Sdump(container), "container root fs capacity")
}
if *container.Rootfs.CapacityBytes == 0 {
return fmt.Errorf(nonZeroValue+" - "+spew.Sdump(container), "container root fs capacity")
}
if container.Rootfs.UsedBytes == nil {
return fmt.Errorf(nonNilValue+" - "+spew.Sdump(container), "container root fs usage")
}
if *container.Rootfs.UsedBytes == 0 {
return fmt.Errorf(nonZeroValue+" - "+spew.Sdump(container), "container root fs usage")
}
if container.Logs == nil {
return fmt.Errorf(nonNilValue+" - "+spew.Sdump(container), "container logs")
}
if container.Logs.AvailableBytes == nil {
return fmt.Errorf(nonNilValue+" - "+spew.Sdump(container), "container logs available")
}
if *container.Logs.AvailableBytes == 0 {
return fmt.Errorf(nonZeroValue+" - "+spew.Sdump(container), "container logs available")
}
if container.Logs.CapacityBytes == nil {
return fmt.Errorf(nonNilValue+" - "+spew.Sdump(container), "container logs capacity")
}
if *container.Logs.CapacityBytes == 0 {
return fmt.Errorf(nonZeroValue+" - "+spew.Sdump(container), "container logs capacity")
}
if container.Logs.UsedBytes == nil {
return fmt.Errorf(nonNilValue+" - "+spew.Sdump(container), "container logs usage")
}
if *container.Logs.UsedBytes == 0 {
return fmt.Errorf(nonZeroValue+" - "+spew.Sdump(container), "container logs usage")
}
}
return nil
}
func expectContainerStatsNotEmpty(container *stats.ContainerStats) error {
// TODO: Test Network
if container.CPU == nil {
return fmt.Errorf("expected container cpu to be not nil - %q", spew.Sdump(container))
}
if container.CPU.UsageCoreNanoSeconds == nil {
return fmt.Errorf("expected container cpu instantaneous usage to be not nil - %q", spew.Sdump(container))
}
if *container.CPU.UsageCoreNanoSeconds == 0 {
return fmt.Errorf("expected container cpu instantaneous usage to be non zero - %q", spew.Sdump(container))
}
if container.Memory == nil {
return fmt.Errorf("expected container memory to be not nil - %q", spew.Sdump(container))
}
if container.Memory.UsageBytes == nil {
return fmt.Errorf("expected container memory usage to be not nil - %q", spew.Sdump(container))
}
if *container.Memory.UsageBytes == 0 {
return fmt.Errorf("expected container memory usage to be non zero - %q", spew.Sdump(container))
}
if container.Memory.WorkingSetBytes == nil {
return fmt.Errorf("expected container memory working set to be not nil - %q", spew.Sdump(container))
}
if *container.Memory.WorkingSetBytes == 0 {
return fmt.Errorf("expected container memory working set to be non zero - %q", spew.Sdump(container))
}
return nil
}

40
test/e2e_node/util.go Normal file
View File

@ -0,0 +1,40 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_node
import (
"flag"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
)
var kubeletAddress = flag.String("kubelet-address", "http://127.0.0.1:10255", "Host and port of the kubelet")
var apiServerAddress = flag.String("api-server-address", "http://127.0.0.1:8080", "Host and port of the api server")
var nodeName = flag.String("node-name", "", "Name of the node")
var buildServices = flag.Bool("build-services", true, "If true, build local executables")
var startServices = flag.Bool("start-services", true, "If true, start local node services")
var stopServices = flag.Bool("stop-services", true, "If true, stop local node services after running tests")
func NewDefaultFramework(baseName string) *framework.Framework {
client := client.NewOrDie(&restclient.Config{Host: *apiServerAddress})
return framework.NewFramework(baseName, framework.FrameworkOptions{
ClientQPS: 100,
ClientBurst: 100,
}, client)
}