mirror of https://github.com/k3s-io/k3s
change e2e scale to avoid adapter
parent
c22f076561
commit
48ac4d549d
|
@ -1,79 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internalclientset
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedauthentication "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/unversioned"
|
||||
unversionedauthorization "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/unversioned"
|
||||
unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned"
|
||||
unversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
|
||||
unversionedstorage "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/typed/discovery"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
// FromUnversionedClient adapts a unversioned.Client to a internalclientset.Clientset.
|
||||
// This function is temporary. We will remove it when everyone has moved to using
|
||||
// Clientset. New code should NOT use this function.
|
||||
func FromUnversionedClient(c *unversioned.Client) *internalclientset.Clientset {
|
||||
var clientset internalclientset.Clientset
|
||||
if c != nil {
|
||||
clientset.CoreClient = unversionedcore.New(c.RESTClient)
|
||||
} else {
|
||||
clientset.CoreClient = unversionedcore.New(nil)
|
||||
}
|
||||
if c != nil && c.ExtensionsClient != nil {
|
||||
clientset.ExtensionsClient = unversionedextensions.New(c.ExtensionsClient.RESTClient)
|
||||
} else {
|
||||
clientset.ExtensionsClient = unversionedextensions.New(nil)
|
||||
}
|
||||
if c != nil && c.BatchClient != nil {
|
||||
clientset.BatchClient = unversionedbatch.New(c.BatchClient.RESTClient)
|
||||
} else {
|
||||
clientset.BatchClient = unversionedbatch.New(nil)
|
||||
}
|
||||
if c != nil && c.AuthorizationClient != nil {
|
||||
clientset.AuthorizationClient = unversionedauthorization.New(c.AuthorizationClient.RESTClient)
|
||||
} else {
|
||||
clientset.AuthorizationClient = unversionedauthorization.New(nil)
|
||||
}
|
||||
if c != nil && c.AutoscalingClient != nil {
|
||||
clientset.AutoscalingClient = unversionedautoscaling.New(c.AutoscalingClient.RESTClient)
|
||||
} else {
|
||||
clientset.AutoscalingClient = unversionedautoscaling.New(nil)
|
||||
}
|
||||
if c != nil && c.AuthenticationClient != nil {
|
||||
clientset.AuthenticationClient = unversionedauthentication.New(c.AuthenticationClient.RESTClient)
|
||||
} else {
|
||||
clientset.AuthenticationClient = unversionedauthentication.New(nil)
|
||||
}
|
||||
if c != nil && c.DiscoveryClient != nil {
|
||||
clientset.DiscoveryClient = discovery.NewDiscoveryClient(c.DiscoveryClient.RESTClient)
|
||||
} else {
|
||||
clientset.DiscoveryClient = discovery.NewDiscoveryClient(nil)
|
||||
}
|
||||
if c != nil && c.StorageClient != nil {
|
||||
clientset.StorageClient = unversionedstorage.New(c.StorageClient.RESTClient)
|
||||
} else {
|
||||
clientset.StorageClient = unversionedstorage.New(nil)
|
||||
}
|
||||
|
||||
return &clientset
|
||||
}
|
|
@ -302,9 +302,9 @@ func (rc *ResourceConsumer) CleanUp() {
|
|||
rc.stopCustomMetric <- 0
|
||||
// Wait some time to ensure all child goroutines are finished.
|
||||
time.Sleep(10 * time.Second)
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.Namespace.Name, rc.name))
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.ClientSet, rc.framework.Namespace.Name, rc.name))
|
||||
framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.Namespace.Name, rc.controllerName))
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.ClientSet, rc.framework.Namespace.Name, rc.controllerName))
|
||||
framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.controllerName))
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
By("Creating unschedulable pod")
|
||||
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
|
||||
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
By("Waiting for scale up hoping it won't happen")
|
||||
// Verfiy, that the appropreate event was generated.
|
||||
|
@ -124,7 +124,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
|
||||
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify, that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
||||
|
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
|
||||
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
|
||||
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify, that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
||||
|
@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
CreateHostPortPods(f, "host-port", nodeCount+2, false)
|
||||
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "host-port")
|
||||
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "host-port")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
|
||||
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
|
||||
|
@ -217,7 +217,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "node-selector"))
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "node-selector"))
|
||||
})
|
||||
|
||||
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
|
@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
|
||||
ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
|
||||
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
|
||||
|
|
|
@ -261,7 +261,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
|
|||
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
|
||||
// to the same size achieves this, because the scale operation advances the RC's sequence number
|
||||
// and awaits it to be observed and reported back in the RC's status.
|
||||
framework.ScaleRC(f.Client, ns, rcName, numPods, true)
|
||||
framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods, true)
|
||||
|
||||
// Only check the keys, the pods can be different if the kubelet updated it.
|
||||
// TODO: Can it really?
|
||||
|
@ -292,9 +292,9 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
|
|||
restarter.kill()
|
||||
// This is best effort to try and create pods while the scheduler is down,
|
||||
// since we don't know exactly when it is restarted after the kill signal.
|
||||
framework.ExpectNoError(framework.ScaleRC(f.Client, ns, rcName, numPods+5, false))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods+5, false))
|
||||
restarter.waitUp()
|
||||
framework.ExpectNoError(framework.ScaleRC(f.Client, ns, rcName, numPods+5, true))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods+5, true))
|
||||
})
|
||||
|
||||
It("Kubelet should not restart containers across restart", func() {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
@ -54,6 +55,7 @@ var MaxContainerFailures = 0
|
|||
type DensityTestConfig struct {
|
||||
Configs []framework.RCConfig
|
||||
Client *client.Client
|
||||
ClientSet internalclientset.Interface
|
||||
Namespace string
|
||||
PollInterval time.Duration
|
||||
PodCount int
|
||||
|
@ -328,7 +330,7 @@ func cleanupDensityTest(dtc DensityTestConfig) {
|
|||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
By("Cleaning up the replication controller and pods")
|
||||
err := framework.DeleteRCAndPods(dtc.Client, dtc.Namespace, rcName)
|
||||
err := framework.DeleteRCAndPods(dtc.Client, dtc.ClientSet, dtc.Namespace, rcName)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
|
@ -487,7 +489,9 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||
}
|
||||
}
|
||||
|
||||
dConfig := DensityTestConfig{Client: c,
|
||||
dConfig := DensityTestConfig{
|
||||
Client: c,
|
||||
ClientSet: f.ClientSet,
|
||||
Configs: RCConfigs,
|
||||
PodCount: totalPods,
|
||||
Namespace: ns,
|
||||
|
@ -705,7 +709,9 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||
Silent: true,
|
||||
}
|
||||
}
|
||||
dConfig := DensityTestConfig{Client: c,
|
||||
dConfig := DensityTestConfig{
|
||||
Client: c,
|
||||
ClientSet: f.ClientSet,
|
||||
Configs: RCConfigs,
|
||||
PodCount: totalPods,
|
||||
Namespace: ns,
|
||||
|
|
|
@ -377,7 +377,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum
|
|||
Expect(err).NotTo(HaveOccurred(), "error creating replication controller")
|
||||
|
||||
defer func() {
|
||||
err := framework.DeleteRCAndPods(f.Client, f.Namespace.Name, rcName)
|
||||
err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName)
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
|
||||
|
|
|
@ -503,7 +503,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scaling rethinkdb")
|
||||
framework.ScaleRC(c, ns, "rethinkdb-rc", 2, true)
|
||||
framework.ScaleRC(c, f.ClientSet, ns, "rethinkdb-rc", 2, true)
|
||||
checkDbInstances()
|
||||
|
||||
By("starting admin")
|
||||
|
@ -546,7 +546,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scaling hazelcast")
|
||||
framework.ScaleRC(c, ns, "hazelcast", 2, true)
|
||||
framework.ScaleRC(c, f.ClientSet, ns, "hazelcast", 2, true)
|
||||
forEachPod("name", "hazelcast", func(pod api.Pod) {
|
||||
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
|
|
@ -60,13 +60,13 @@ const (
|
|||
type Framework struct {
|
||||
BaseName string
|
||||
|
||||
// Client is manually created and should not be used unless absolutely necessary. Use Clientset_1_5
|
||||
// Client is manually created and should not be used unless absolutely necessary. Use ClientSet_1_5
|
||||
// where possible.
|
||||
Client *client.Client
|
||||
// ClientSet uses internal objects, you should use Clientset_1_5 where possible.
|
||||
// ClientSet uses internal objects, you should use ClientSet_1_5 where possible.
|
||||
ClientSet internalclientset.Interface
|
||||
|
||||
Clientset_1_5 *release_1_5.Clientset
|
||||
ClientSet_1_5 *release_1_5.Clientset
|
||||
StagingClient *staging.Clientset
|
||||
ClientPool dynamic.ClientPool
|
||||
|
||||
|
@ -201,7 +201,7 @@ func (f *Framework) BeforeEach() {
|
|||
f.Client = c
|
||||
f.ClientSet, err = internalclientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.Clientset_1_5, err = release_1_5.NewForConfig(config)
|
||||
f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
clientRepoConfig := getClientRepoConfig(config)
|
||||
f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
|
||||
|
@ -366,7 +366,7 @@ func (f *Framework) AfterEach() {
|
|||
// Print events if the test failed.
|
||||
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
|
||||
DumpAllNamespaceInfo(f.Client, f.Clientset_1_5, f.Namespace.Name)
|
||||
DumpAllNamespaceInfo(f.Client, f.ClientSet_1_5, f.Namespace.Name)
|
||||
By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
|
||||
LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller")
|
||||
if f.federated {
|
||||
|
|
|
@ -57,7 +57,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/typed/discovery"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
@ -3224,9 +3223,9 @@ func RemoveTaintOffNode(c *client.Client, nodeName string, taint api.Taint) {
|
|||
}
|
||||
}
|
||||
|
||||
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
|
||||
func ScaleRC(c *client.Client, clientset clientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(c))
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3333,7 +3332,7 @@ func WaitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (p
|
|||
}
|
||||
|
||||
// DeleteRCAndPods a Replication Controller and all pods it spawned
|
||||
func DeleteRCAndPods(c *client.Client, ns, name string) error {
|
||||
func DeleteRCAndPods(c *client.Client, clientset clientset.Interface, ns, name string) error {
|
||||
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
|
||||
rc, err := c.ReplicationControllers(ns).Get(name)
|
||||
if err != nil {
|
||||
|
@ -3343,7 +3342,7 @@ func DeleteRCAndPods(c *client.Client, ns, name string) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
reaper, err := kubectl.ReaperForReplicationController(clientsetadapter.FromUnversionedClient(c).Core(), 10*time.Minute)
|
||||
reaper, err := kubectl.ReaperForReplicationController(clientset.Core(), 10*time.Minute)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("RC %s was already deleted: %v", name, err)
|
||||
|
@ -3481,7 +3480,7 @@ func waitForPodsGone(ps *PodStore, interval, timeout time.Duration) error {
|
|||
}
|
||||
|
||||
// Delete a ReplicaSet and all pods it spawned
|
||||
func DeleteReplicaSet(c *client.Client, ns, name string) error {
|
||||
func DeleteReplicaSet(c *client.Client, clientset clientset.Interface, ns, name string) error {
|
||||
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
|
||||
rc, err := c.Extensions().ReplicaSets(ns).Get(name)
|
||||
if err != nil {
|
||||
|
@ -3491,7 +3490,7 @@ func DeleteReplicaSet(c *client.Client, ns, name string) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), clientsetadapter.FromUnversionedClient(c))
|
||||
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), clientset)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("ReplicaSet %s was already deleted: %v", name, err)
|
||||
|
@ -4722,7 +4721,7 @@ func GetNodePortURL(client *client.Client, ns, name string, svcPort int) (string
|
|||
|
||||
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||
func ScaleRCByLabels(client *client.Client, ns string, l map[string]string, replicas uint) error {
|
||||
func ScaleRCByLabels(client *client.Client, clientset clientset.Interface, ns string, l map[string]string, replicas uint) error {
|
||||
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))}
|
||||
rcs, err := client.ReplicationControllers(ns).List(listOpts)
|
||||
if err != nil {
|
||||
|
@ -4734,7 +4733,7 @@ func ScaleRCByLabels(client *client.Client, ns string, l map[string]string, repl
|
|||
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
||||
for _, labelRC := range rcs.Items {
|
||||
name := labelRC.Name
|
||||
if err := ScaleRC(client, ns, name, replicas, false); err != nil {
|
||||
if err := ScaleRC(client, clientset, ns, name, replicas, false); err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := client.ReplicationControllers(ns).Get(name)
|
||||
|
|
|
@ -118,7 +118,7 @@ func gatherMetrics(f *framework.Framework) {
|
|||
var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
f := framework.NewDefaultFramework("gc")
|
||||
It("[Feature:GarbageCollector] should delete pods created by rc when not orphaning", func() {
|
||||
clientSet := f.Clientset_1_5
|
||||
clientSet := f.ClientSet_1_5
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
|
@ -169,7 +169,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
})
|
||||
|
||||
It("[Feature:GarbageCollector] should orphan pods created by rc if delete options say so", func() {
|
||||
clientSet := f.Clientset_1_5
|
||||
clientSet := f.ClientSet_1_5
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
|
@ -231,7 +231,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
})
|
||||
|
||||
It("[Feature:GarbageCollector] should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
|
||||
clientSet := f.Clientset_1_5
|
||||
clientSet := f.ClientSet_1_5
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
|
|
|
@ -122,7 +122,7 @@ func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
|
|||
var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
||||
f := framework.NewDefaultFramework("clientset")
|
||||
It("should create pods, delete pods, watch pods", func() {
|
||||
podClient := f.Clientset_1_5.Core().Pods(f.Namespace.Name)
|
||||
podClient := f.ClientSet_1_5.Core().Pods(f.Namespace.Name)
|
||||
By("constructing the pod")
|
||||
name := "pod" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
|
|
|
@ -198,7 +198,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
|
|||
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
|
||||
if err != nil || len(pods) < atLeast {
|
||||
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
|
||||
framework.DumpAllNamespaceInfo(c, f.Clientset_1_5, ns)
|
||||
framework.DumpAllNamespaceInfo(c, f.ClientSet_1_5, ns)
|
||||
framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -205,7 +205,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
|||
}
|
||||
|
||||
By("Deleting the RC")
|
||||
framework.DeleteRCAndPods(f.Client, f.Namespace.Name, rcName)
|
||||
framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName)
|
||||
// Check that the pods really are gone by querying /runningpods on the
|
||||
// node. The /runningpods handler checks the container runtime (or its
|
||||
// cache) and returns a list of running pods. Some possible causes of
|
||||
|
|
|
@ -115,7 +115,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
|||
verifyCPULimits(expectedCPU, cpuSummary)
|
||||
|
||||
By("Deleting the RC")
|
||||
framework.DeleteRCAndPods(f.Client, f.Namespace.Name, rcName)
|
||||
framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName)
|
||||
}
|
||||
|
||||
func verifyMemoryLimits(c *client.Client, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
|
||||
|
|
|
@ -26,6 +26,8 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
@ -321,7 +323,7 @@ func scaleRC(wg *sync.WaitGroup, config *framework.RCConfig, scalingTime time.Du
|
|||
|
||||
sleepUpTo(scalingTime)
|
||||
newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
|
||||
framework.ExpectNoError(framework.ScaleRC(config.Client, config.Namespace, config.Name, newSize, true),
|
||||
framework.ExpectNoError(framework.ScaleRC(config.Client, coreClientSetFromUnversioned(config.Client), config.Namespace, config.Name, newSize, true),
|
||||
fmt.Sprintf("scaling rc %s for the first time", config.Name))
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
|
||||
options := api.ListOptions{
|
||||
|
@ -349,6 +351,17 @@ func deleteRC(wg *sync.WaitGroup, config *framework.RCConfig, deletingTime time.
|
|||
if framework.TestContext.GarbageCollectorEnabled {
|
||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
|
||||
} else {
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, coreClientSetFromUnversioned(config.Client), config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
|
||||
}
|
||||
}
|
||||
|
||||
// coreClientSetFromUnversioned adapts just enough of a a unversioned.Client to work with the scale RC function
|
||||
func coreClientSetFromUnversioned(c *client.Client) internalclientset.Interface {
|
||||
var clientset internalclientset.Clientset
|
||||
if c != nil {
|
||||
clientset.CoreClient = unversionedcore.New(c.RESTClient)
|
||||
} else {
|
||||
clientset.CoreClient = unversionedcore.New(nil)
|
||||
}
|
||||
return &clientset
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ func proxyContext(version string) {
|
|||
CreatedPods: &pods,
|
||||
}
|
||||
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
|
||||
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, cfg.Name)
|
||||
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, cfg.Name)
|
||||
|
||||
Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
|
|||
// Cleanup the replication controller when we are done.
|
||||
defer func() {
|
||||
// Resize the replication controller to zero to get rid of pods.
|
||||
if err := framework.DeleteRCAndPods(f.Client, f.Namespace.Name, controller.Name); err != nil {
|
||||
if err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
|
||||
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -86,7 +86,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
|
|||
// Cleanup the ReplicaSet when we are done.
|
||||
defer func() {
|
||||
// Resize the ReplicaSet to zero to get rid of pods.
|
||||
if err := framework.DeleteReplicaSet(f.Client, f.Namespace.Name, rs.Name); err != nil {
|
||||
if err := framework.DeleteReplicaSet(f.Client, f.ClientSet, f.Namespace.Name, rs.Name); err != nil {
|
||||
framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err)
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
|
|||
It("should ensure that critical pod is scheduled in case there is no resources available", func() {
|
||||
By("reserving all available cpu")
|
||||
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
|
||||
defer framework.DeleteRCAndPods(f.Client, ns, "reserve-all-cpu")
|
||||
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, ns, "reserve-all-cpu")
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("creating a new instance of DNS and waiting for DNS to be scheduled")
|
||||
|
@ -61,8 +61,8 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
|
|||
rc := rcs.Items[0]
|
||||
replicas := uint(rc.Spec.Replicas)
|
||||
|
||||
err = framework.ScaleRC(f.Client, api.NamespaceSystem, rc.Name, replicas+1, true)
|
||||
defer framework.ExpectNoError(framework.ScaleRC(f.Client, api.NamespaceSystem, rc.Name, replicas, true))
|
||||
err = framework.ScaleRC(f.Client, f.ClientSet, api.NamespaceSystem, rc.Name, replicas+1, true)
|
||||
defer framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, api.NamespaceSystem, rc.Name, replicas, true))
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
@ -72,7 +72,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
|
|||
replicas := millicores / 100
|
||||
|
||||
ReserveCpu(f, id, 1, 100)
|
||||
framework.ExpectNoError(framework.ScaleRC(f.Client, f.Namespace.Name, id, uint(replicas), false))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, f.Namespace.Name, id, uint(replicas), false))
|
||||
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
||||
pods, err := framework.GetPodsInNamespace(f.Client, f.Namespace.Name, framework.ImagePullerLabels)
|
||||
|
|
|
@ -186,19 +186,18 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
var totalPodCapacity int64
|
||||
var RCName string
|
||||
var ns string
|
||||
f := framework.NewDefaultFramework("sched-pred")
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
|
||||
AfterEach(func() {
|
||||
rc, err := c.ReplicationControllers(ns).Get(RCName)
|
||||
if err == nil && rc.Spec.Replicas != 0 {
|
||||
By("Cleaning up the replication controller")
|
||||
err := framework.DeleteRCAndPods(c, ns, RCName)
|
||||
err := framework.DeleteRCAndPods(c, f.ClientSet, ns, RCName)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("sched-pred")
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.Client
|
||||
ns = f.Namespace.Name
|
||||
|
@ -957,7 +956,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
// cannot be scheduled onto it.
|
||||
By("Launching two pods on two distinct nodes to get two node names")
|
||||
CreateHostPortPods(f, "host-port", 2, true)
|
||||
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "host-port")
|
||||
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "host-port")
|
||||
podList, err := c.Pods(ns).List(api.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
Expect(len(podList.Items)).To(Equal(2))
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/service"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
@ -309,7 +310,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
|
||||
// Stop service 1 and make sure it is gone.
|
||||
By("stopping service1")
|
||||
framework.ExpectNoError(stopServeHostnameService(c, ns, "service1"))
|
||||
framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service1"))
|
||||
|
||||
By("verifying service1 is not up")
|
||||
framework.ExpectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort))
|
||||
|
@ -342,11 +343,11 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
svc1 := "service1"
|
||||
svc2 := "service2"
|
||||
|
||||
defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, svc1)) }()
|
||||
defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, svc1)) }()
|
||||
podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, svc2)) }()
|
||||
defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, svc2)) }()
|
||||
podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -391,7 +392,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
ns := f.Namespace.Name
|
||||
numPods, servicePort := 3, 80
|
||||
|
||||
defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, "service1")) }()
|
||||
defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service1")) }()
|
||||
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -416,7 +417,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
|
||||
|
||||
// Create a new service and check if it's not reusing IP.
|
||||
defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, "service2")) }()
|
||||
defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service2")) }()
|
||||
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -1647,8 +1648,8 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas
|
|||
return podNames, serviceIP, nil
|
||||
}
|
||||
|
||||
func stopServeHostnameService(c *client.Client, ns, name string) error {
|
||||
if err := framework.DeleteRCAndPods(c, ns, name); err != nil {
|
||||
func stopServeHostnameService(c *client.Client, clientset internalclientset.Interface, ns, name string) error {
|
||||
if err := framework.DeleteRCAndPods(c, clientset, ns, name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.Services(ns).Delete(name); err != nil {
|
||||
|
|
|
@ -126,7 +126,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int) (output
|
|||
if err := framework.RunRC(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, cfg.Name)
|
||||
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, cfg.Name)
|
||||
|
||||
// Run a single watcher, to reduce the number of API calls we have to
|
||||
// make; this is to minimize the timing error. It's how kube-proxy
|
||||
|
|
|
@ -219,7 +219,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
|
|||
// Cleanup the replication controller when we are done.
|
||||
defer func() {
|
||||
// Resize the replication controller to zero to get rid of pods.
|
||||
if err := framework.DeleteRCAndPods(f.Client, f.Namespace.Name, controller.Name); err != nil {
|
||||
if err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
|
||||
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -42,10 +42,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
|
@ -300,8 +298,8 @@ func RCFromManifest(fileName string) *api.ReplicationController {
|
|||
}
|
||||
|
||||
// StopRC stops the rc via kubectl's stop library
|
||||
func StopRC(rc *api.ReplicationController, restClient *client.Client) error {
|
||||
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(restClient))
|
||||
func StopRC(rc *api.ReplicationController, clientset clientset.Interface) error {
|
||||
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil || reaper == nil {
|
||||
return err
|
||||
}
|
||||
|
@ -313,8 +311,8 @@ func StopRC(rc *api.ReplicationController, restClient *client.Client) error {
|
|||
}
|
||||
|
||||
// ScaleRC scales the given rc to the given replicas.
|
||||
func ScaleRC(name, ns string, replicas int32, restClient *client.Client) (*api.ReplicationController, error) {
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(restClient))
|
||||
func ScaleRC(name, ns string, replicas int32, clientset clientset.Interface) (*api.ReplicationController, error) {
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -324,64 +322,13 @@ func ScaleRC(name, ns string, replicas int32, restClient *client.Client) (*api.R
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scaled, err := restClient.ReplicationControllers(ns).Get(name)
|
||||
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return scaled, nil
|
||||
}
|
||||
|
||||
// StartRC creates given rc if it doesn't already exist, then updates it via kubectl's scaler.
|
||||
func StartRC(controller *api.ReplicationController, restClient *client.Client) (*api.ReplicationController, error) {
|
||||
created, err := restClient.ReplicationControllers(controller.Namespace).Get(controller.Name)
|
||||
if err != nil {
|
||||
glog.Infof("Rc %v doesn't exist, creating", controller.Name)
|
||||
created, err = restClient.ReplicationControllers(controller.Namespace).Create(controller)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// If we just created an rc, wait till it creates its replicas.
|
||||
return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
|
||||
}
|
||||
|
||||
// StartPods check for numPods in namespace. If they exist, it no-ops, otherwise it starts up
|
||||
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
|
||||
func StartPods(namespace string, numPods int, host string, restClient *client.Client) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
|
||||
}()
|
||||
hostField := fields.OneTermEqualSelector(api.PodHostField, host)
|
||||
options := api.ListOptions{FieldSelector: hostField}
|
||||
pods, err := restClient.Pods(namespace).List(options)
|
||||
if err != nil || len(pods.Items) == numPods {
|
||||
return err
|
||||
}
|
||||
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
|
||||
// For the sake of simplicity, assume all pods in namespace have selectors matching TestRCManifest.
|
||||
controller := RCFromManifest(TestRCManifest)
|
||||
|
||||
// Overwrite namespace
|
||||
controller.ObjectMeta.Namespace = namespace
|
||||
controller.Spec.Template.ObjectMeta.Namespace = namespace
|
||||
|
||||
// Make the rc unique to the given host.
|
||||
controller.Spec.Replicas = int32(numPods)
|
||||
controller.Spec.Template.Spec.NodeName = host
|
||||
controller.Name = controller.Name + host
|
||||
controller.Spec.Selector["host"] = host
|
||||
controller.Spec.Template.Labels["host"] = host
|
||||
|
||||
if rc, err := StartRC(controller, restClient); err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Delete the rc, otherwise when we restart master components for the next benchmark
|
||||
// the rc controller will race with the pods controller in the rc manager.
|
||||
return restClient.ReplicationControllers(namespace).Delete(rc.Name, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server) {
|
||||
if masterConfig == nil {
|
||||
masterConfig = NewMasterConfig()
|
||||
|
|
Loading…
Reference in New Issue