mirror of https://github.com/k3s-io/k3s
Merge pull request #55053 from xiangpengzhao/version-check-auth
Automatic merge from submit-queue (batch tested with PRs 55063, 54523, 55053). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Don't need to check version for auth e2e test **What this PR does / why we need it**: In 1.9 cycle, some e2e test don't need to run against so older versions. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: ref: #55050 **Special notes for your reviewer**: /cc @tallclair @liggitt **Release note**: ```release-note NONE ```pull/6/head
commit
92952cfe77
|
@ -21,7 +21,6 @@ go_library(
|
|||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//plugin/pkg/admission/serviceaccount:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
@ -36,10 +35,6 @@ import (
|
|||
|
||||
var mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
|
||||
|
||||
var serviceAccountTokenNamespaceVersion = utilversion.MustParseSemantic("v1.2.0")
|
||||
|
||||
var serviceAccountTokenAutomountVersion = utilversion.MustParseSemantic("v1.6.0-alpha.2")
|
||||
|
||||
var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
f := framework.NewDefaultFramework("svcaccounts")
|
||||
|
||||
|
@ -220,16 +215,13 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
|||
},
|
||||
}
|
||||
|
||||
supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.ClientSet.Discovery())
|
||||
if supportsTokenNamespace {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
|
||||
Name: "namespace-test",
|
||||
Image: mountImage,
|
||||
Args: []string{
|
||||
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey),
|
||||
},
|
||||
})
|
||||
}
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
|
||||
Name: "namespace-test",
|
||||
Image: mountImage,
|
||||
Args: []string{
|
||||
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey),
|
||||
},
|
||||
})
|
||||
|
||||
f.TestContainerOutput("consume service account token", pod, 0, []string{
|
||||
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey, tokenContent),
|
||||
|
@ -238,16 +230,12 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
|||
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey, rootCAContent),
|
||||
})
|
||||
|
||||
if supportsTokenNamespace {
|
||||
f.TestContainerOutput("consume service account namespace", pod, 2, []string{
|
||||
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey, f.Namespace.Name),
|
||||
})
|
||||
}
|
||||
f.TestContainerOutput("consume service account namespace", pod, 2, []string{
|
||||
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey, f.Namespace.Name),
|
||||
})
|
||||
})
|
||||
|
||||
framework.ConformanceIt("should allow opting out of API token automount ", func() {
|
||||
framework.SkipUnlessServerVersionGTE(serviceAccountTokenAutomountVersion, f.ClientSet.Discovery())
|
||||
|
||||
var err error
|
||||
trueValue := true
|
||||
falseValue := false
|
||||
|
|
|
@ -280,33 +280,17 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
|
|||
|
||||
// getStatsSummary contacts kubelet for the container information.
|
||||
func getStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
var data []byte
|
||||
if subResourceProxyAvailable {
|
||||
data, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
Do().Raw()
|
||||
data, err := c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
Do().Raw()
|
||||
|
||||
} else {
|
||||
data, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Prefix("proxy").
|
||||
Resource("nodes").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
Do().Raw()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -406,30 +390,14 @@ func getOneTimeResourceUsageOnNode(
|
|||
}
|
||||
|
||||
func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err := c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
SetHeader("Content-Type", "application/json").
|
||||
Do().Raw()
|
||||
|
||||
var data []byte
|
||||
if subResourceProxyAvailable {
|
||||
data, err = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
SetHeader("Content-Type", "application/json").
|
||||
Do().Raw()
|
||||
|
||||
} else {
|
||||
data, err = c.CoreV1().RESTClient().Get().
|
||||
Prefix("proxy").
|
||||
Resource("nodes").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
SetHeader("Content-Type", "application/json").
|
||||
Do().Raw()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -443,11 +443,6 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
|||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data string
|
||||
var masterRegistered = false
|
||||
for _, node := range nodes.Items {
|
||||
|
@ -460,25 +455,14 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
|||
defer cancel()
|
||||
|
||||
var rawData []byte
|
||||
if subResourceProxyAvailable {
|
||||
rawData, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(metav1.NamespaceSystem).
|
||||
Resource("pods").
|
||||
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
|
||||
SubResource("proxy").
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
} else {
|
||||
rawData, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Prefix("proxy").
|
||||
Namespace(metav1.NamespaceSystem).
|
||||
SubResource("pods").
|
||||
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
}
|
||||
rawData, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(metav1.NamespaceSystem).
|
||||
Resource("pods").
|
||||
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
|
||||
SubResource("proxy").
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
|
||||
ExpectNoError(err)
|
||||
data = string(rawData)
|
||||
|
|
|
@ -244,24 +244,8 @@ func GetPauseImageNameForHostArch() string {
|
|||
return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion
|
||||
}
|
||||
|
||||
// SubResource proxy should have been functional in v1.0.0, but SubResource
|
||||
// proxy via tunneling is known to be broken in v1.0. See
|
||||
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463
|
||||
//
|
||||
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
|
||||
// in v1.3).
|
||||
var SubResourcePodProxyVersion = utilversion.MustParseSemantic("v1.1.0")
|
||||
var SubResourceServiceAndNodeProxyVersion = utilversion.MustParseSemantic("v1.2.0")
|
||||
|
||||
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if subResourceProxyAvailable {
|
||||
return request.Resource("services").SubResource("proxy"), nil
|
||||
}
|
||||
return request.Prefix("proxy").Resource("services"), nil
|
||||
return request.Resource("services").SubResource("proxy"), nil
|
||||
}
|
||||
|
||||
// unique identifier of the e2e run
|
||||
|
@ -1672,34 +1656,19 @@ func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
|||
if !isElementOf(pod.UID, currentPods) {
|
||||
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
|
||||
}
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c.Discovery())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
var body []byte
|
||||
if subResourceProxyAvailable {
|
||||
body, err = r.c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(r.ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(string(pod.Name)).
|
||||
Do().
|
||||
Raw()
|
||||
} else {
|
||||
body, err = r.c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Prefix("proxy").
|
||||
Namespace(r.ns).
|
||||
Resource("pods").
|
||||
Name(string(pod.Name)).
|
||||
Do().
|
||||
Raw()
|
||||
}
|
||||
body, err := r.c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(r.ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(string(pod.Name)).
|
||||
Do().
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// We may encounter errors here because of a race between the pod readiness and apiserver
|
||||
|
@ -4404,29 +4373,16 @@ const proxyTimeout = 2 * time.Minute
|
|||
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
|
||||
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
|
||||
// This will leak a goroutine if proxy hangs. #22165
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return restclient.Result{}, err
|
||||
}
|
||||
var result restclient.Result
|
||||
finished := make(chan struct{})
|
||||
go func() {
|
||||
if subResourceProxyAvailable {
|
||||
result = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
|
||||
Suffix(endpoint).
|
||||
Do()
|
||||
result = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
|
||||
Suffix(endpoint).
|
||||
Do()
|
||||
|
||||
} else {
|
||||
result = c.CoreV1().RESTClient().Get().
|
||||
Prefix("proxy").
|
||||
Resource("nodes").
|
||||
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
|
||||
Suffix(endpoint).
|
||||
Do()
|
||||
}
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
|
|
|
@ -66,42 +66,21 @@ var (
|
|||
|
||||
// Query sends a command to the server and returns the Response
|
||||
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
|
||||
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourceServiceAndNodeProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
var result []byte
|
||||
if subResourceProxyAvailable {
|
||||
result, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace("kube-system").
|
||||
Resource("services").
|
||||
Name(influxdbService+":api").
|
||||
SubResource("proxy").
|
||||
Suffix("query").
|
||||
Param("q", query).
|
||||
Param("db", influxdbDatabaseName).
|
||||
Param("epoch", "s").
|
||||
Do().
|
||||
Raw()
|
||||
} else {
|
||||
result, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Prefix("proxy").
|
||||
Namespace("kube-system").
|
||||
Resource("services").
|
||||
Name(influxdbService+":api").
|
||||
Suffix("query").
|
||||
Param("q", query).
|
||||
Param("db", influxdbDatabaseName).
|
||||
Param("epoch", "s").
|
||||
Do().
|
||||
Raw()
|
||||
}
|
||||
result, err := c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace("kube-system").
|
||||
Resource("services").
|
||||
Name(influxdbService+":api").
|
||||
SubResource("proxy").
|
||||
Suffix("query").
|
||||
Param("q", query).
|
||||
Param("db", influxdbDatabaseName).
|
||||
Param("epoch", "s").
|
||||
Do().
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
|
|
|
@ -1990,34 +1990,19 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string)
|
|||
// getUDData validates data.json in the update-demo (returns nil if data is ok).
|
||||
return func(c clientset.Interface, podID string) error {
|
||||
framework.Logf("validating pod %s", podID)
|
||||
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
var body []byte
|
||||
if subResourceProxyAvailable {
|
||||
body, err = c.CoreV1().RESTClient().Get().
|
||||
Namespace(ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(podID).
|
||||
Suffix("data.json").
|
||||
Do().
|
||||
Raw()
|
||||
} else {
|
||||
body, err = c.CoreV1().RESTClient().Get().
|
||||
Prefix("proxy").
|
||||
Namespace(ns).
|
||||
Resource("pods").
|
||||
Name(podID).
|
||||
Suffix("data.json").
|
||||
Do().
|
||||
Raw()
|
||||
}
|
||||
body, err := c.CoreV1().RESTClient().Get().
|
||||
Namespace(ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(podID).
|
||||
Suffix("data.json").
|
||||
Do().
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
framework.Failf("Failed to retrieve data from container: %v", err)
|
||||
|
|
|
@ -169,35 +169,20 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
|
|||
|
||||
framework.ExpectNoError(wait.Poll(time.Second*10, time.Second*600, func() (bool, error) {
|
||||
failed = []string{}
|
||||
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client.Discovery())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
var contents []byte
|
||||
for _, fileName := range fileNames {
|
||||
if subResourceProxyAvailable {
|
||||
contents, err = client.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(pod.Namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(pod.Name).
|
||||
Suffix(fileDir, fileName).
|
||||
Do().Raw()
|
||||
} else {
|
||||
contents, err = client.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Prefix("proxy").
|
||||
Resource("pods").
|
||||
Namespace(pod.Namespace).
|
||||
Name(pod.Name).
|
||||
Suffix(fileDir, fileName).
|
||||
Do().Raw()
|
||||
}
|
||||
contents, err := client.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(pod.Namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(pod.Name).
|
||||
Suffix(fileDir, fileName).
|
||||
Do().Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
|
|
|
@ -121,34 +121,20 @@ func testPreStop(c clientset.Interface, ns string) {
|
|||
|
||||
// Validate that the server received the web poke.
|
||||
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
|
||||
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
var body []byte
|
||||
if subResourceProxyAvailable {
|
||||
body, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(podDescr.Name).
|
||||
Suffix("read").
|
||||
DoRaw()
|
||||
} else {
|
||||
body, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Prefix("proxy").
|
||||
Namespace(ns).
|
||||
Resource("pods").
|
||||
Name(podDescr.Name).
|
||||
Suffix("read").
|
||||
DoRaw()
|
||||
}
|
||||
body, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(podDescr.Name).
|
||||
Suffix("read").
|
||||
DoRaw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
framework.Failf("Error validating prestop: %v", err)
|
||||
|
|
Loading…
Reference in New Issue