Merge pull request #55053 from xiangpengzhao/version-check-auth

Automatic merge from submit-queue (batch tested with PRs 55063, 54523, 55053). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Don't need to check version for auth e2e test

**What this PR does / why we need it**:
In 1.9 cycle, some e2e test don't need to run against so older versions.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
ref: #55050

**Special notes for your reviewer**:
/cc @tallclair @liggitt

**Release note**:

```release-note
NONE
```
pull/6/head
Kubernetes Submit Queue 2017-11-03 10:00:15 -07:00 committed by GitHub
commit 92952cfe77
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 87 additions and 257 deletions

View File

@ -21,7 +21,6 @@ go_library(
"//pkg/security/apparmor:go_default_library", "//pkg/security/apparmor:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library", "//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/security/podsecuritypolicy/util:go_default_library", "//pkg/security/podsecuritypolicy/util:go_default_library",
"//pkg/util/version:go_default_library",
"//plugin/pkg/admission/serviceaccount:go_default_library", "//plugin/pkg/admission/serviceaccount:go_default_library",
"//test/e2e/common:go_default_library", "//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",

View File

@ -25,7 +25,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -36,10 +35,6 @@ import (
var mountImage = imageutils.GetE2EImage(imageutils.Mounttest) var mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
var serviceAccountTokenNamespaceVersion = utilversion.MustParseSemantic("v1.2.0")
var serviceAccountTokenAutomountVersion = utilversion.MustParseSemantic("v1.6.0-alpha.2")
var _ = SIGDescribe("ServiceAccounts", func() { var _ = SIGDescribe("ServiceAccounts", func() {
f := framework.NewDefaultFramework("svcaccounts") f := framework.NewDefaultFramework("svcaccounts")
@ -220,16 +215,13 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}, },
} }
supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.ClientSet.Discovery()) pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
if supportsTokenNamespace { Name: "namespace-test",
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{ Image: mountImage,
Name: "namespace-test", Args: []string{
Image: mountImage, fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey),
Args: []string{ },
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey), })
},
})
}
f.TestContainerOutput("consume service account token", pod, 0, []string{ f.TestContainerOutput("consume service account token", pod, 0, []string{
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey, tokenContent), fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey, tokenContent),
@ -238,16 +230,12 @@ var _ = SIGDescribe("ServiceAccounts", func() {
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey, rootCAContent), fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey, rootCAContent),
}) })
if supportsTokenNamespace { f.TestContainerOutput("consume service account namespace", pod, 2, []string{
f.TestContainerOutput("consume service account namespace", pod, 2, []string{ fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey, f.Namespace.Name),
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey, f.Namespace.Name), })
})
}
}) })
framework.ConformanceIt("should allow opting out of API token automount ", func() { framework.ConformanceIt("should allow opting out of API token automount ", func() {
framework.SkipUnlessServerVersionGTE(serviceAccountTokenAutomountVersion, f.ClientSet.Discovery())
var err error var err error
trueValue := true trueValue := true
falseValue := false falseValue := false

View File

@ -280,33 +280,17 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
// getStatsSummary contacts kubelet for the container information. // getStatsSummary contacts kubelet for the container information.
func getStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) { func getStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel() defer cancel()
var data []byte data, err := c.CoreV1().RESTClient().Get().
if subResourceProxyAvailable { Context(ctx).
data, err = c.CoreV1().RESTClient().Get(). Resource("nodes").
Context(ctx). SubResource("proxy").
Resource("nodes"). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
SubResource("proxy"). Suffix("stats/summary").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). Do().Raw()
Suffix("stats/summary").
Do().Raw()
} else {
data, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
Do().Raw()
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -406,30 +390,14 @@ func getOneTimeResourceUsageOnNode(
} }
func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) { func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery()) data, err := c.CoreV1().RESTClient().Get().
if err != nil { Resource("nodes").
return nil, err SubResource("proxy").
} Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
SetHeader("Content-Type", "application/json").
Do().Raw()
var data []byte
if subResourceProxyAvailable {
data, err = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
SetHeader("Content-Type", "application/json").
Do().Raw()
} else {
data, err = c.CoreV1().RESTClient().Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
SetHeader("Content-Type", "application/json").
Do().Raw()
}
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -443,11 +443,6 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
ExpectNoError(err) ExpectNoError(err)
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
var data string var data string
var masterRegistered = false var masterRegistered = false
for _, node := range nodes.Items { for _, node := range nodes.Items {
@ -460,25 +455,14 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
defer cancel() defer cancel()
var rawData []byte var rawData []byte
if subResourceProxyAvailable { rawData, err = c.CoreV1().RESTClient().Get().
rawData, err = c.CoreV1().RESTClient().Get(). Context(ctx).
Context(ctx). Namespace(metav1.NamespaceSystem).
Namespace(metav1.NamespaceSystem). Resource("pods").
Resource("pods"). Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)). SubResource("proxy").
SubResource("proxy"). Suffix("metrics").
Suffix("metrics"). Do().Raw()
Do().Raw()
} else {
rawData, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace(metav1.NamespaceSystem).
SubResource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
Suffix("metrics").
Do().Raw()
}
ExpectNoError(err) ExpectNoError(err)
data = string(rawData) data = string(rawData)

View File

@ -244,24 +244,8 @@ func GetPauseImageNameForHostArch() string {
return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion
} }
// SubResource proxy should have been functional in v1.0.0, but SubResource
// proxy via tunneling is known to be broken in v1.0. See
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
// in v1.3).
var SubResourcePodProxyVersion = utilversion.MustParseSemantic("v1.1.0")
var SubResourceServiceAndNodeProxyVersion = utilversion.MustParseSemantic("v1.2.0")
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) { func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery()) return request.Resource("services").SubResource("proxy"), nil
if err != nil {
return nil, err
}
if subResourceProxyAvailable {
return request.Resource("services").SubResource("proxy"), nil
}
return request.Prefix("proxy").Resource("services"), nil
} }
// unique identifier of the e2e run // unique identifier of the e2e run
@ -1672,34 +1656,19 @@ func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
if !isElementOf(pod.UID, currentPods) { if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods) return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
} }
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c.Discovery())
if err != nil {
return false, err
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel() defer cancel()
var body []byte body, err := r.c.CoreV1().RESTClient().Get().
if subResourceProxyAvailable { Context(ctx).
body, err = r.c.CoreV1().RESTClient().Get(). Namespace(r.ns).
Context(ctx). Resource("pods").
Namespace(r.ns). SubResource("proxy").
Resource("pods"). Name(string(pod.Name)).
SubResource("proxy"). Do().
Name(string(pod.Name)). Raw()
Do().
Raw()
} else {
body, err = r.c.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace(r.ns).
Resource("pods").
Name(string(pod.Name)).
Do().
Raw()
}
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver // We may encounter errors here because of a race between the pod readiness and apiserver
@ -4404,29 +4373,16 @@ const proxyTimeout = 2 * time.Minute
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) { func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. // proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165 // This will leak a goroutine if proxy hangs. #22165
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return restclient.Result{}, err
}
var result restclient.Result var result restclient.Result
finished := make(chan struct{}) finished := make(chan struct{})
go func() { go func() {
if subResourceProxyAvailable { result = c.CoreV1().RESTClient().Get().
result = c.CoreV1().RESTClient().Get(). Resource("nodes").
Resource("nodes"). SubResource("proxy").
SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint).
Suffix(endpoint). Do()
Do()
} else {
result = c.CoreV1().RESTClient().Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
}
finished <- struct{}{} finished <- struct{}{}
}() }()
select { select {

View File

@ -66,42 +66,21 @@ var (
// Query sends a command to the server and returns the Response // Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) { func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel() defer cancel()
var result []byte result, err := c.CoreV1().RESTClient().Get().
if subResourceProxyAvailable { Context(ctx).
result, err = c.CoreV1().RESTClient().Get(). Namespace("kube-system").
Context(ctx). Resource("services").
Namespace("kube-system"). Name(influxdbService+":api").
Resource("services"). SubResource("proxy").
Name(influxdbService+":api"). Suffix("query").
SubResource("proxy"). Param("q", query).
Suffix("query"). Param("db", influxdbDatabaseName).
Param("q", query). Param("epoch", "s").
Param("db", influxdbDatabaseName). Do().
Param("epoch", "s"). Raw()
Do().
Raw()
} else {
result, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
}
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {

View File

@ -1990,34 +1990,19 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string)
// getUDData validates data.json in the update-demo (returns nil if data is ok). // getUDData validates data.json in the update-demo (returns nil if data is ok).
return func(c clientset.Interface, podID string) error { return func(c clientset.Interface, podID string) error {
framework.Logf("validating pod %s", podID) framework.Logf("validating pod %s", podID)
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery())
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel() defer cancel()
var body []byte body, err := c.CoreV1().RESTClient().Get().
if subResourceProxyAvailable { Namespace(ns).
body, err = c.CoreV1().RESTClient().Get(). Resource("pods").
Namespace(ns). SubResource("proxy").
Resource("pods"). Name(podID).
SubResource("proxy"). Suffix("data.json").
Name(podID). Do().
Suffix("data.json"). Raw()
Do().
Raw()
} else {
body, err = c.CoreV1().RESTClient().Get().
Prefix("proxy").
Namespace(ns).
Resource("pods").
Name(podID).
Suffix("data.json").
Do().
Raw()
}
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
framework.Failf("Failed to retrieve data from container: %v", err) framework.Failf("Failed to retrieve data from container: %v", err)

View File

@ -169,35 +169,20 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
framework.ExpectNoError(wait.Poll(time.Second*10, time.Second*600, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*10, time.Second*600, func() (bool, error) {
failed = []string{} failed = []string{}
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client.Discovery())
if err != nil {
return false, err
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel() defer cancel()
var contents []byte
for _, fileName := range fileNames { for _, fileName := range fileNames {
if subResourceProxyAvailable { contents, err := client.CoreV1().RESTClient().Get().
contents, err = client.CoreV1().RESTClient().Get(). Context(ctx).
Context(ctx). Namespace(pod.Namespace).
Namespace(pod.Namespace). Resource("pods").
Resource("pods"). SubResource("proxy").
SubResource("proxy"). Name(pod.Name).
Name(pod.Name). Suffix(fileDir, fileName).
Suffix(fileDir, fileName). Do().Raw()
Do().Raw()
} else {
contents, err = client.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Resource("pods").
Namespace(pod.Namespace).
Name(pod.Name).
Suffix(fileDir, fileName).
Do().Raw()
}
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err) framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)

View File

@ -121,34 +121,20 @@ func testPreStop(c clientset.Interface, ns string) {
// Validate that the server received the web poke. // Validate that the server received the web poke.
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery())
if err != nil {
return false, err
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel() defer cancel()
var body []byte var body []byte
if subResourceProxyAvailable { body, err = c.CoreV1().RESTClient().Get().
body, err = c.CoreV1().RESTClient().Get(). Context(ctx).
Context(ctx). Namespace(ns).
Namespace(ns). Resource("pods").
Resource("pods"). SubResource("proxy").
SubResource("proxy"). Name(podDescr.Name).
Name(podDescr.Name). Suffix("read").
Suffix("read"). DoRaw()
DoRaw()
} else {
body, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace(ns).
Resource("pods").
Name(podDescr.Name).
Suffix("read").
DoRaw()
}
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
framework.Failf("Error validating prestop: %v", err) framework.Failf("Error validating prestop: %v", err)