Remove the conversion of client config, because client-go is authoratative now

pull/6/head
Chao Xu 2017-09-12 16:02:17 -07:00
parent 108ee22096
commit 6c5a8d5db9
4 changed files with 13 additions and 95 deletions

View File

@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
@ -149,7 +148,7 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool)
return
}
var _ = SIGDescribe("Generated release_1_5 clientset", func() {
var _ = SIGDescribe("Generated clientset", func() {
f := framework.NewDefaultFramework("clientset")
It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() {
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
@ -261,7 +260,7 @@ func newTestingCronJob(name string, value string) *batchv1beta1.CronJob {
}
}
var _ = SIGDescribe("Generated release_1_5 clientset", func() {
var _ = SIGDescribe("Generated clientset", func() {
f := framework.NewDefaultFramework("clientset")
BeforeEach(func() {
@ -326,41 +325,3 @@ var _ = SIGDescribe("Generated release_1_5 clientset", func() {
Expect(len(cronJobs.Items)).To(Equal(0))
})
})
var _ = SIGDescribe("Staging client repo client", func() {
f := framework.NewDefaultFramework("clientset")
It("should create pods, delete pods, watch pods", func() {
podClient := f.StagingClient.CoreV1().Pods(f.Namespace.Name)
By("constructing the pod")
name := "pod" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
podCopy := stagingClientPod(name, value)
pod := &podCopy
By("verifying no pod exists before the test")
pods, err := podClient.List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
}
Expect(len(pods.Items)).To(Equal(0))
By("creating the pod")
pod, err = podClient.Create(pod)
if err != nil {
framework.Failf("Failed to create pod: %v", err)
}
By("verifying the pod is in kubernetes")
timeout := 1 * time.Minute
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = podClient.List(metav1.ListOptions{})
if err != nil {
return false, err
}
if len(pods.Items) == 1 {
return true, nil
}
return false, nil
}); err != nil {
framework.Failf("Err : %s\n. Failed to wait for 1 pod to be created", err)
}
})
})

View File

@ -44,10 +44,10 @@ const (
var _ = SIGDescribe("DisruptionController", func() {
f := framework.NewDefaultFramework("disruption")
var ns string
var cs *kubernetes.Clientset
var cs kubernetes.Interface
BeforeEach(func() {
cs = f.StagingClient
cs = f.ClientSet
ns = f.Namespace.Name
})
@ -215,7 +215,7 @@ var _ = SIGDescribe("DisruptionController", func() {
}
})
func createPDBMinAvailableOrDie(cs *kubernetes.Clientset, ns string, minAvailable intstr.IntOrString) {
func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) {
pdb := policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
@ -230,7 +230,7 @@ func createPDBMinAvailableOrDie(cs *kubernetes.Clientset, ns string, minAvailabl
Expect(err).NotTo(HaveOccurred())
}
func createPDBMaxUnavailableOrDie(cs *kubernetes.Clientset, ns string, maxUnavailable intstr.IntOrString) {
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) {
pdb := policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
@ -245,7 +245,7 @@ func createPDBMaxUnavailableOrDie(cs *kubernetes.Clientset, ns string, maxUnavai
Expect(err).NotTo(HaveOccurred())
}
func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
for i := 0; i < n; i++ {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -269,7 +269,7 @@ func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
}
}
func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"})
@ -298,7 +298,7 @@ func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
}
func createReplicaSetOrDie(cs *kubernetes.Clientset, ns string, size int32, exclusive bool) {
func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) {
container := v1.Container{
Name: "busybox",
Image: "gcr.io/google_containers/echoserver:1.6",

View File

@ -735,10 +735,10 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
MinAvailable: &minAvailable,
},
}
_, err = f.StagingClient.Policy().PodDisruptionBudgets(namespace).Create(pdb)
_, err = f.ClientSet.Policy().PodDisruptionBudgets(namespace).Create(pdb)
defer func() {
f.StagingClient.Policy().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
f.ClientSet.Policy().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
}()
framework.ExpectNoError(err)
@ -1405,7 +1405,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
newPdbs := make([]string, 0)
cleanup := func() {
for _, newPdbName := range newPdbs {
f.StagingClient.Policy().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
f.ClientSet.Policy().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
}
}
@ -1434,7 +1434,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
MinAvailable: &minAvailable,
},
}
_, err := f.StagingClient.Policy().PodDisruptionBudgets("kube-system").Create(pdb)
_, err := f.ClientSet.Policy().PodDisruptionBudgets("kube-system").Create(pdb)
newPdbs = append(newPdbs, pdbName)
if err != nil {

View File

@ -19,11 +19,9 @@ package framework
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"os"
"path"
"reflect"
"strings"
"sync"
"time"
@ -34,14 +32,10 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
staging "k8s.io/client-go/kubernetes"
clientreporestclient "k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/api"
@ -66,12 +60,10 @@ const (
type Framework struct {
BaseName string
// ClientSet uses internal objects, you should use ClientSet where possible.
ClientSet clientset.Interface
KubemarkExternalClusterClientSet clientset.Interface
InternalClientset *internalclientset.Clientset
StagingClient *staging.Clientset
AggregatorClient *aggregatorclient.Clientset
ClientPool dynamic.ClientPool
@ -144,38 +136,6 @@ func NewFramework(baseName string, options FrameworkOptions, client clientset.In
return f
}
// getClientRepoConfig copies k8s.io/kubernetes/pkg/client/restclient.Config to
// a k8s.io/client-go/pkg/client/restclient.Config. It's not a deep copy. Two
// configs may share some common struct.
func getClientRepoConfig(src *restclient.Config) (dst *clientreporestclient.Config) {
skippedFields := sets.NewString("Transport", "WrapTransport", "RateLimiter", "AuthConfigPersister")
dst = &clientreporestclient.Config{}
dst.Transport = src.Transport
dst.WrapTransport = src.WrapTransport
dst.RateLimiter = src.RateLimiter
dst.AuthConfigPersister = src.AuthConfigPersister
sv := reflect.ValueOf(src).Elem()
dv := reflect.ValueOf(dst).Elem()
for i := 0; i < sv.NumField(); i++ {
if skippedFields.Has(sv.Type().Field(i).Name) {
continue
}
sf := sv.Field(i).Interface()
data, err := json.Marshal(sf)
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
if !dv.Field(i).CanAddr() {
Failf("unaddressable field: %v", dv.Type().Field(i).Name)
} else {
if err := json.Unmarshal(data, dv.Field(i).Addr().Interface()); err != nil {
Expect(err).NotTo(HaveOccurred())
}
}
}
return dst
}
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
// The fact that we need this feels like a bug in ginkgo.
@ -199,9 +159,6 @@ func (f *Framework) BeforeEach() {
Expect(err).NotTo(HaveOccurred())
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
clientRepoConfig := getClientRepoConfig(config)
f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
Expect(err).NotTo(HaveOccurred())
f.ClientPool = dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)