mirror of https://github.com/k3s-io/k3s
Merge pull request #54431 from bradtopol/addpredicateproxyconform
Automatic merge from submit-queue (batch tested with PRs 54455, 54431). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add conformance annotations for proxy and scheduler predicate tests Signed-off-by: Brad Topol <btopol@us.ibm.com> /sig testing /area conformance @sig-testing-pr-reviews This PR adds proxy and scheduler predicate related conformance annotations to the e2e test suite. The PR fixes a portion of #53822. It focuses on adding conformance annotations as defined by the Kubernetes Conformance Workgroup for a subset of the pod based e2e conformance tests. Special notes for your reviewer: Please see https://docs.google.com/spreadsheets/d/1WWSOqFaG35VmmPOYbwetapj1VPOVMqjZfR9ih5To5gk/edit#gid=62929400 for the list of SIG Arch approved test names and descriptions that I am using. **Release note**: ```release-note NONE ```pull/6/head
commit
7914aed80a
|
@ -62,16 +62,44 @@ var _ = SIGDescribe("Proxy", func() {
|
|||
prefix := "/api/" + version
|
||||
|
||||
// Port here has to be kept in sync with default kubelet port.
|
||||
/*
|
||||
Testname: proxy-prefix-node-logs-port
|
||||
Description: Ensure that proxy on node logs works with generic top
|
||||
level prefix proxy and explicit kubelet port.
|
||||
*/
|
||||
It("should proxy logs on node with explicit kubelet port [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":10250/logs/") })
|
||||
|
||||
/*
|
||||
Testname: proxy-prefix-node-logs
|
||||
Description: Ensure that proxy on node logs works with generic top
|
||||
level prefix proxy.
|
||||
*/
|
||||
It("should proxy logs on node [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", "/logs/") })
|
||||
It("should proxy to cadvisor", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":4194/containers/") })
|
||||
|
||||
/*
|
||||
Testname: proxy-subresource-node-logs-port
|
||||
Description: Ensure that proxy on node logs works with node proxy
|
||||
subresource and explicit kubelet port.
|
||||
*/
|
||||
It("should proxy logs on node with explicit kubelet port using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", ":10250/proxy/logs/") })
|
||||
|
||||
/*
|
||||
Testname: proxy-subresource-node-logs
|
||||
Description: Ensure that proxy on node logs works with node proxy
|
||||
subresource.
|
||||
*/
|
||||
It("should proxy logs on node using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
|
||||
It("should proxy to cadvisor using proxy subresource", func() { nodeProxyTest(f, prefix+"/nodes/", ":4194/proxy/containers/") })
|
||||
|
||||
// using the porter image to serve content, access the content
|
||||
// (of multiple pods?) from multiple (endpoints/services?)
|
||||
|
||||
/*
|
||||
Testname: proxy-service-pod
|
||||
Description: Ensure that proxy through a service and a pod works with
|
||||
both generic top level prefix proxy and proxy subresource.
|
||||
*/
|
||||
It("should proxy through a service and a pod [Conformance]", func() {
|
||||
start := time.Now()
|
||||
labels := map[string]string{"proxy-service-target": "true"}
|
||||
|
|
|
@ -237,6 +237,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||
// 3. Wait for the pods to be scheduled.
|
||||
// 4. Create another pod with no affinity to any node that need 50% of the largest node CPU.
|
||||
// 5. Make sure this additional pod is not scheduled.
|
||||
/*
|
||||
Testname: scheduler-resource-limits
|
||||
Description: Ensure that scheduler accounts node resources correctly
|
||||
and respects pods' resource requirements during scheduling.
|
||||
*/
|
||||
It("validates resource limits of pods that are allowed to run [Conformance]", func() {
|
||||
framework.WaitForStableCluster(cs, masterNodes)
|
||||
nodeMaxAllocatable := int64(0)
|
||||
|
@ -338,6 +343,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||
|
||||
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
|
||||
// nonempty Selector set.
|
||||
/*
|
||||
Testname: scheduler-node-selector-not-matching
|
||||
Description: Ensure that scheduler respects the NodeSelector field of
|
||||
PodSpec during scheduling (when it does not match any node).
|
||||
*/
|
||||
It("validates that NodeSelector is respected if not matching [Conformance]", func() {
|
||||
By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||
podName := "restricted-pod"
|
||||
|
@ -379,6 +389,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: scheduler-node-selector-matching
|
||||
Description: Ensure that scheduler respects the NodeSelector field
|
||||
of PodSpec during scheduling (when it matches).
|
||||
*/
|
||||
It("validates that NodeSelector is respected if matching [Conformance]", func() {
|
||||
nodeName := GetNodeThatCanRunPod(f)
|
||||
|
||||
|
|
Loading…
Reference in New Issue