Add conformance annotations for proxy and scheduler predicate tests

Signed-off-by: Brad Topol <btopol@us.ibm.com>
pull/6/head
Brad Topol 2017-10-23 11:30:05 -07:00
parent 060b4b8b84
commit 095913e9d7
2 changed files with 43 additions and 0 deletions

View File

@ -62,16 +62,44 @@ var _ = SIGDescribe("Proxy", func() {
prefix := "/api/" + version
// Port here has to be kept in sync with default kubelet port.
/*
Testname: proxy-prefix-node-logs-port
Description: Ensure that proxy on node logs works with generic top
level prefix proxy and explicit kubelet port.
*/
It("should proxy logs on node with explicit kubelet port [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":10250/logs/") })
/*
Testname: proxy-prefix-node-logs
Description: Ensure that proxy on node logs works with generic top
level prefix proxy.
*/
It("should proxy logs on node [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", "/logs/") })
It("should proxy to cadvisor", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":4194/containers/") })
/*
Testname: proxy-subresource-node-logs-port
Description: Ensure that proxy on node logs works with node proxy
subresource and explicit kubelet port.
*/
It("should proxy logs on node with explicit kubelet port using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", ":10250/proxy/logs/") })
/*
Testname: proxy-subresource-node-logs
Description: Ensure that proxy on node logs works with node proxy
subresource.
*/
It("should proxy logs on node using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
It("should proxy to cadvisor using proxy subresource", func() { nodeProxyTest(f, prefix+"/nodes/", ":4194/proxy/containers/") })
// using the porter image to serve content, access the content
// (of multiple pods?) from multiple (endpoints/services?)
/*
Testname: proxy-service-pod
Description: Ensure that proxy through a service and a pod works with
both generic top level prefix proxy and proxy subresource.
*/
It("should proxy through a service and a pod [Conformance]", func() {
start := time.Now()
labels := map[string]string{"proxy-service-target": "true"}

View File

@ -237,6 +237,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// 3. Wait for the pods to be scheduled.
// 4. Create another pod with no affinity to any node that need 50% of the largest node CPU.
// 5. Make sure this additional pod is not scheduled.
/*
Testname: scheduler-resource-limits
Description: Ensure that scheduler accounts node resources correctly
and respects pods' resource requirements during scheduling.
*/
It("validates resource limits of pods that are allowed to run [Conformance]", func() {
framework.WaitForStableCluster(cs, masterNodes)
nodeMaxAllocatable := int64(0)
@ -338,6 +343,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// nonempty Selector set.
/*
Testname: scheduler-node-selector-not-matching
Description: Ensure that scheduler respects the NodeSelector field of
PodSpec during scheduling (when it does not match any node).
*/
It("validates that NodeSelector is respected if not matching [Conformance]", func() {
By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"
@ -379,6 +389,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}
})
/*
Testname: scheduler-node-selector-matching
Description: Ensure that scheduler respects the NodeSelector field
of PodSpec during scheduling (when it matches).
*/
It("validates that NodeSelector is respected if matching [Conformance]", func() {
nodeName := GetNodeThatCanRunPod(f)