mirror of https://github.com/k3s-io/k3s
Add an option to pass client's QPS/burst to e2e framework
parent
38a30490ed
commit
110340c467
|
@ -192,7 +192,7 @@ var _ = Describe("Addon update", func() {
|
|||
|
||||
var dir string
|
||||
var sshClient *ssh.Client
|
||||
f := NewFramework("addon-update-test")
|
||||
f := NewDefaultFramework("addon-update-test")
|
||||
|
||||
BeforeEach(func() {
|
||||
// This test requires:
|
||||
|
|
|
@ -33,7 +33,7 @@ const (
|
|||
|
||||
var _ = Describe("Cadvisor", func() {
|
||||
|
||||
f := NewFramework("cadvisor")
|
||||
f := NewDefaultFramework("cadvisor")
|
||||
|
||||
It("should be healthy on every node.", func() {
|
||||
CheckCadvisorHealthOnAllNodes(f.Client, 5*time.Minute)
|
||||
|
|
|
@ -38,7 +38,7 @@ const (
|
|||
//
|
||||
// These tests take ~20 minutes to run each.
|
||||
var _ = Describe("Cluster size autoscaling [Feature:ClusterSizeAutoscaling] [Slow]", func() {
|
||||
f := NewFramework("autoscaling")
|
||||
f := NewDefaultFramework("autoscaling")
|
||||
var nodeCount int
|
||||
var coresPerNode int
|
||||
var memCapacityMb int
|
||||
|
|
|
@ -160,7 +160,7 @@ var _ = Describe("Upgrade [Feature:Upgrade]", func() {
|
|||
Logf("Version for %q is %q", testContext.UpgradeTarget, v)
|
||||
})
|
||||
|
||||
f := NewFramework("cluster-upgrade")
|
||||
f := NewDefaultFramework("cluster-upgrade")
|
||||
var w *ServiceTestFixture
|
||||
BeforeEach(func() {
|
||||
By("Setting up the service, RC, and pods")
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
var _ = Describe("ConfigMap", func() {
|
||||
|
||||
f := NewFramework("configmap")
|
||||
f := NewDefaultFramework("configmap")
|
||||
|
||||
It("should be consumable from pods in volume [Conformance]", func() {
|
||||
name := "configmap-test-volume-" + string(util.NewUUID())
|
||||
|
|
|
@ -36,7 +36,7 @@ const (
|
|||
)
|
||||
|
||||
var _ = Describe("Probing container", func() {
|
||||
framework := NewFramework("container-probe")
|
||||
framework := NewDefaultFramework("container-probe")
|
||||
var podClient client.PodInterface
|
||||
probe := webserverProbeBuilder{}
|
||||
|
||||
|
|
|
@ -185,7 +185,7 @@ func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Sele
|
|||
|
||||
var _ = Describe("DaemonRestart [Disruptive]", func() {
|
||||
|
||||
framework := NewFramework("daemonrestart")
|
||||
framework := NewDefaultFramework("daemonrestart")
|
||||
rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID())
|
||||
labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
|
||||
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
|
|
|
@ -66,7 +66,7 @@ var _ = Describe("Daemon set", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
f = NewFramework("daemonsets")
|
||||
f = NewDefaultFramework("daemonsets")
|
||||
|
||||
image := "gcr.io/google_containers/serve_hostname:1.1"
|
||||
dsName := "daemon-set"
|
||||
|
|
|
@ -37,7 +37,7 @@ var _ = Describe("Kubernetes Dashboard", func() {
|
|||
serverStartTimeout = 1 * time.Minute
|
||||
)
|
||||
|
||||
f := NewFramework(uiServiceName)
|
||||
f := NewDefaultFramework(uiServiceName)
|
||||
|
||||
It("should check that the kubernetes-dashboard instance is alive", func() {
|
||||
By("Checking whether the kubernetes-dashboard service exists.")
|
||||
|
|
|
@ -131,19 +131,15 @@ var _ = Describe("Density", func() {
|
|||
|
||||
// Explicitly put here, to delete namespace at the end of the test
|
||||
// (after measuring latency metrics, etc.).framework := NewFramework("density")
|
||||
framework := NewFramework("density")
|
||||
options := FrameworkOptions{
|
||||
clientQPS: 20,
|
||||
clientBurst: 30,
|
||||
}
|
||||
framework := NewFramework("density", options)
|
||||
framework.NamespaceDeletionTimeout = time.Hour
|
||||
|
||||
BeforeEach(func() {
|
||||
// Explicitly create a client with higher QPS limits.
|
||||
// However, make those at most comparable to components.
|
||||
config, err := loadConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config.QPS = 20
|
||||
config.Burst = 30
|
||||
c, err = loadClientFromConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
c = framework.Client
|
||||
ns = framework.Namespace.Name
|
||||
|
||||
nodes := ListSchedulableNodesOrDie(c)
|
||||
|
@ -153,7 +149,7 @@ var _ = Describe("Density", func() {
|
|||
// Terminating a namespace (deleting the remaining objects from it - which
|
||||
// generally means events) can affect the current run. Thus we wait for all
|
||||
// terminating namespace to be finally deleted before starting this test.
|
||||
err = checkTestingNSDeletedExcept(c, ns)
|
||||
err := checkTestingNSDeletedExcept(c, ns)
|
||||
expectNoError(err)
|
||||
|
||||
uuid = string(util.NewUUID())
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Deployment", func() {
|
||||
f := NewFramework("deployment")
|
||||
f := NewDefaultFramework("deployment")
|
||||
|
||||
It("deployment should create new pods", func() {
|
||||
testNewDeployment(f)
|
||||
|
|
|
@ -197,7 +197,7 @@ func validateDNSResults(f *Framework, pod *api.Pod, fileNames []string) {
|
|||
}
|
||||
|
||||
var _ = Describe("DNS", func() {
|
||||
f := NewFramework("dns")
|
||||
f := NewDefaultFramework("dns")
|
||||
|
||||
It("should provide DNS for the cluster", func() {
|
||||
// TODO: support DNS on vagrant #3580
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Docker Containers", func() {
|
||||
framework := NewFramework("containers")
|
||||
framework := NewDefaultFramework("containers")
|
||||
var c *client.Client
|
||||
var ns string
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Downward API", func() {
|
||||
framework := NewFramework("downward-api")
|
||||
framework := NewDefaultFramework("downward-api")
|
||||
|
||||
It("should provide pod name and namespace as env vars [Conformance]", func() {
|
||||
podName := "downward-api-" + string(util.NewUUID())
|
||||
|
|
|
@ -31,7 +31,7 @@ var _ = Describe("Downward API volume", func() {
|
|||
// How long to wait for a log pod to be displayed
|
||||
const podLogTimeout = 45 * time.Second
|
||||
|
||||
f := NewFramework("downward-api")
|
||||
f := NewDefaultFramework("downward-api")
|
||||
It("should provide podname only [Conformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(util.NewUUID())
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
|
||||
|
|
|
@ -35,7 +35,7 @@ const (
|
|||
|
||||
var _ = Describe("EmptyDir volumes", func() {
|
||||
|
||||
f := NewFramework("emptydir")
|
||||
f := NewDefaultFramework("emptydir")
|
||||
|
||||
Context("when FSGroup is specified [Feature:FSGroup]", func() {
|
||||
It("new files should be created with FSGroup ownership when container is root", func() {
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
// This test will create a pod with a secret volume and gitRepo volume
|
||||
// Thus requests a secret, a git server pod, and a git server service
|
||||
var _ = Describe("EmptyDir wrapper volumes", func() {
|
||||
f := NewFramework("secrets")
|
||||
f := NewDefaultFramework("secrets")
|
||||
|
||||
It("should becomes running", func() {
|
||||
name := "secret-test-" + string(util.NewUUID())
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Cluster level logging using Elasticsearch [Feature:Elasticsearch]", func() {
|
||||
f := NewFramework("es-logging")
|
||||
f := NewDefaultFramework("es-logging")
|
||||
|
||||
BeforeEach(func() {
|
||||
// TODO: For now assume we are only testing cluster logging with Elasticsearch
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
var _ = Describe("Etcd failure [Disruptive]", func() {
|
||||
|
||||
framework := NewFramework("etcd-failure")
|
||||
framework := NewDefaultFramework("etcd-failure")
|
||||
|
||||
BeforeEach(func() {
|
||||
// This test requires:
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Events", func() {
|
||||
framework := NewFramework("events")
|
||||
framework := NewDefaultFramework("events")
|
||||
|
||||
It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() {
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ except:
|
|||
print 'err'`
|
||||
|
||||
var _ = Describe("ClusterDns [Feature:Example]", func() {
|
||||
framework := NewFramework("cluster-dns")
|
||||
framework := NewDefaultFramework("cluster-dns")
|
||||
|
||||
var c *client.Client
|
||||
BeforeEach(func() {
|
||||
|
|
|
@ -157,7 +157,7 @@ var _ = Describe("Pet Store [Feature:Example]", func() {
|
|||
|
||||
// The number of nodes dictates total number of generators/transaction expectations.
|
||||
var nodeCount int
|
||||
f := NewFramework("petstore")
|
||||
f := NewDefaultFramework("petstore")
|
||||
|
||||
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
|
||||
nodes := ListSchedulableNodesOrDie(f.Client)
|
||||
|
|
|
@ -37,7 +37,7 @@ const (
|
|||
)
|
||||
|
||||
var _ = Describe("[Feature:Example]", func() {
|
||||
framework := NewFramework("examples")
|
||||
framework := NewDefaultFramework("examples")
|
||||
var c *client.Client
|
||||
var ns string
|
||||
BeforeEach(func() {
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
// These tests exercise the Kubernetes expansion syntax $(VAR).
|
||||
// For more information, see: docs/design/expansion.md
|
||||
var _ = Describe("Variable Expansion", func() {
|
||||
framework := NewFramework("var-expansion")
|
||||
framework := NewDefaultFramework("var-expansion")
|
||||
|
||||
It("should allow composing env vars into new env vars [Conformance]", func() {
|
||||
podName := "var-expansion-" + string(util.NewUUID())
|
||||
|
|
|
@ -65,6 +65,9 @@ type Framework struct {
|
|||
// we install a cleanup action before each test and clear it after. If we
|
||||
// should abort, the AfterSuite hook should run all cleanup actions.
|
||||
cleanupHandle CleanupActionHandle
|
||||
|
||||
// configuration for framework's client
|
||||
options FrameworkOptions
|
||||
}
|
||||
|
||||
type TestDataSummary interface {
|
||||
|
@ -72,12 +75,26 @@ type TestDataSummary interface {
|
|||
PrintJSON() string
|
||||
}
|
||||
|
||||
type FrameworkOptions struct {
|
||||
clientQPS float32
|
||||
clientBurst int
|
||||
}
|
||||
|
||||
// NewFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||
// you (you can write additional before/after each functions).
|
||||
func NewFramework(baseName string) *Framework {
|
||||
func NewDefaultFramework(baseName string) *Framework {
|
||||
options := FrameworkOptions{
|
||||
clientQPS: 5,
|
||||
clientBurst: 10,
|
||||
}
|
||||
return NewFramework(baseName, options)
|
||||
}
|
||||
|
||||
func NewFramework(baseName string, options FrameworkOptions) *Framework {
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
addonResourceConstraints: make(map[string]resourceConstraint),
|
||||
options: options,
|
||||
}
|
||||
|
||||
BeforeEach(f.beforeEach)
|
||||
|
@ -93,7 +110,11 @@ func (f *Framework) beforeEach() {
|
|||
f.cleanupHandle = AddCleanupAction(f.afterEach)
|
||||
|
||||
By("Creating a kubernetes client")
|
||||
c, err := loadClient()
|
||||
config, err := loadConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config.QPS = f.options.clientQPS
|
||||
config.Burst = f.options.clientBurst
|
||||
c, err := loadClientFromConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
f.Client = c
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
//
|
||||
// Slow by design (7 min)
|
||||
var _ = Describe("Garbage collector [Slow]", func() {
|
||||
f := NewFramework("garbage-collector")
|
||||
f := NewDefaultFramework("garbage-collector")
|
||||
It("should handle the creation of 1000 pods", func() {
|
||||
SkipUnlessProviderIs("gce")
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Generated release_1_2 clientset", func() {
|
||||
framework := NewFramework("clientset")
|
||||
framework := NewDefaultFramework("clientset")
|
||||
It("should create pods, delete pods, watch pods", func() {
|
||||
podClient := framework.Clientset_1_2.Core().Pods(framework.Namespace.Name)
|
||||
By("creating the pod")
|
||||
|
|
|
@ -36,7 +36,7 @@ const (
|
|||
// These tests take ~20 minutes each.
|
||||
var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow]", func() {
|
||||
var rc *ResourceConsumer
|
||||
f := NewFramework("horizontal-pod-autoscaling")
|
||||
f := NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
|
||||
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
|
||||
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
//TODO : Consolidate this code with the code for emptyDir.
|
||||
//This will require some smart.
|
||||
var _ = Describe("hostPath", func() {
|
||||
framework := NewFramework("hostpath")
|
||||
framework := NewDefaultFramework("hostpath")
|
||||
var c *client.Client
|
||||
var namespace *api.Namespace
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
//
|
||||
// Flaky issue #20272
|
||||
var _ = Describe("Initial Resources [Feature:InitialResources] [Flaky]", func() {
|
||||
f := NewFramework("initial-resources")
|
||||
f := NewDefaultFramework("initial-resources")
|
||||
|
||||
It("should set initial resources based on historical data", func() {
|
||||
// TODO(piosz): Add cleanup data in InfluxDB that left from previous tests.
|
||||
|
|
|
@ -40,7 +40,7 @@ const (
|
|||
)
|
||||
|
||||
var _ = Describe("Job", func() {
|
||||
f := NewFramework("job")
|
||||
f := NewDefaultFramework("job")
|
||||
parallelism := 2
|
||||
completions := 4
|
||||
lotsOfFailures := 5 // more than completions
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Kibana Logging Instances Is Alive", func() {
|
||||
f := NewFramework("kibana-logging")
|
||||
f := NewDefaultFramework("kibana-logging")
|
||||
|
||||
BeforeEach(func() {
|
||||
// TODO: For now assume we are only testing cluster logging with Elasticsearch
|
||||
|
|
|
@ -105,7 +105,7 @@ var (
|
|||
|
||||
var _ = Describe("Kubectl client", func() {
|
||||
defer GinkgoRecover()
|
||||
framework := NewFramework("kubectl")
|
||||
framework := NewDefaultFramework("kubectl")
|
||||
var c *client.Client
|
||||
var ns string
|
||||
BeforeEach(func() {
|
||||
|
|
|
@ -89,7 +89,7 @@ func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNam
|
|||
var _ = Describe("kubelet", func() {
|
||||
var numNodes int
|
||||
var nodeNames sets.String
|
||||
framework := NewFramework("kubelet")
|
||||
framework := NewDefaultFramework("kubelet")
|
||||
var resourceMonitor *resourceMonitor
|
||||
|
||||
BeforeEach(func() {
|
||||
|
|
|
@ -41,7 +41,7 @@ type KubeletManagedHostConfig struct {
|
|||
}
|
||||
|
||||
var _ = Describe("KubeletManagedEtcHosts", func() {
|
||||
f := NewFramework("e2e-kubelet-etc-hosts")
|
||||
f := NewDefaultFramework("e2e-kubelet-etc-hosts")
|
||||
config := &KubeletManagedHostConfig{
|
||||
f: f,
|
||||
}
|
||||
|
|
|
@ -179,7 +179,7 @@ func verifyCPULimits(expected containersCPUSummary, actual nodesCPUSummary) {
|
|||
// Slow by design (1 hour)
|
||||
var _ = Describe("Kubelet [Serial] [Slow]", func() {
|
||||
var nodeNames sets.String
|
||||
framework := NewFramework("kubelet-perf")
|
||||
framework := NewDefaultFramework("kubelet-perf")
|
||||
var rm *resourceMonitor
|
||||
|
||||
BeforeEach(func() {
|
||||
|
|
|
@ -66,7 +66,7 @@ type KubeProxyTestConfig struct {
|
|||
}
|
||||
|
||||
var _ = Describe("KubeProxy", func() {
|
||||
f := NewFramework("e2e-kubeproxy")
|
||||
f := NewDefaultFramework("e2e-kubeproxy")
|
||||
config := &KubeProxyTestConfig{
|
||||
f: f,
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("LimitRange", func() {
|
||||
f := NewFramework("limitrange")
|
||||
f := NewDefaultFramework("limitrange")
|
||||
|
||||
It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
|
||||
By("Creating a LimitRange")
|
||||
|
|
|
@ -64,17 +64,15 @@ var _ = Describe("Load capacity", func() {
|
|||
|
||||
// Explicitly put here, to delete namespace at the end of the test
|
||||
// (after measuring latency metrics, etc.).
|
||||
framework := NewFramework("load")
|
||||
options := FrameworkOptions{
|
||||
clientQPS: 50,
|
||||
clientBurst: 100,
|
||||
}
|
||||
framework := NewFramework("load", options)
|
||||
framework.NamespaceDeletionTimeout = time.Hour
|
||||
|
||||
BeforeEach(func() {
|
||||
// Explicitly create a client with higher QPS limits.
|
||||
config, err := loadConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config.QPS = 50
|
||||
config.Burst = 100
|
||||
c, err = loadClientFromConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
c = framework.Client
|
||||
|
||||
ns = framework.Namespace.Name
|
||||
nodes := ListSchedulableNodesOrDie(c)
|
||||
|
@ -84,7 +82,7 @@ var _ = Describe("Load capacity", func() {
|
|||
// Terminating a namespace (deleting the remaining objects from it - which
|
||||
// generally means events) can affect the current run. Thus we wait for all
|
||||
// terminating namespace to be finally deleted before starting this test.
|
||||
err = checkTestingNSDeletedExcept(c, ns)
|
||||
err := checkTestingNSDeletedExcept(c, ns)
|
||||
expectNoError(err)
|
||||
|
||||
expectNoError(resetMetrics(c))
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Mesos", func() {
|
||||
framework := NewFramework("pods")
|
||||
framework := NewDefaultFramework("pods")
|
||||
var c *client.Client
|
||||
var ns string
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ func checkMetrics(response metrics.Metrics, assumedMetrics map[string][]string)
|
|||
}
|
||||
|
||||
var _ = Describe("MetricsGrabber", func() {
|
||||
framework := NewFramework("metrics-grabber")
|
||||
framework := NewDefaultFramework("metrics-grabber")
|
||||
var c *client.Client
|
||||
var grabber *metrics.MetricsGrabber
|
||||
BeforeEach(func() {
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Monitoring", func() {
|
||||
f := NewFramework("monitoring")
|
||||
f := NewDefaultFramework("monitoring")
|
||||
|
||||
BeforeEach(func() {
|
||||
SkipUnlessProviderIs("gce")
|
||||
|
|
|
@ -104,7 +104,7 @@ func extinguish(f *Framework, totalNS int, maxAllowedAfterDel int, maxSeconds in
|
|||
// rate of approximately 1 per second.
|
||||
var _ = Describe("Namespaces [Serial]", func() {
|
||||
|
||||
f := NewFramework("namespaces")
|
||||
f := NewDefaultFramework("namespaces")
|
||||
|
||||
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",
|
||||
func() { extinguish(f, 100, 10, 150) })
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Networking", func() {
|
||||
f := NewFramework("nettest")
|
||||
f := NewDefaultFramework("nettest")
|
||||
|
||||
var svcname = "nettest"
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ const (
|
|||
var _ = Describe("NodeOutOfDisk [Serial] [Flaky]", func() {
|
||||
var c *client.Client
|
||||
var unfilledNodeName, recoveredNodeName string
|
||||
framework := NewFramework("node-outofdisk")
|
||||
framework := NewDefaultFramework("node-outofdisk")
|
||||
|
||||
BeforeEach(func() {
|
||||
c = framework.Client
|
||||
|
|
|
@ -47,7 +47,7 @@ var _ = Describe("Pod Disks", func() {
|
|||
host0Name string
|
||||
host1Name string
|
||||
)
|
||||
framework := NewFramework("pod-disks")
|
||||
framework := NewDefaultFramework("pod-disks")
|
||||
|
||||
BeforeEach(func() {
|
||||
SkipUnlessNodeCountIsAtLeast(2)
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
// This test needs privileged containers, which are disabled by default. Run
|
||||
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
|
||||
var _ = Describe("PersistentVolumes [Feature:Volumes]", func() {
|
||||
framework := NewFramework("pv")
|
||||
framework := NewDefaultFramework("pv")
|
||||
var c *client.Client
|
||||
var ns string
|
||||
|
||||
|
|
|
@ -207,7 +207,7 @@ func getRestartDelay(c *client.Client, pod *api.Pod, ns string, name string, con
|
|||
}
|
||||
|
||||
var _ = Describe("Pods", func() {
|
||||
framework := NewFramework("pods")
|
||||
framework := NewDefaultFramework("pods")
|
||||
|
||||
It("should get a host IP [Conformance]", func() {
|
||||
name := "pod-hostip-" + string(util.NewUUID())
|
||||
|
|
|
@ -111,7 +111,7 @@ func runPortForward(ns, podName string, port int) (*exec.Cmd, int) {
|
|||
}
|
||||
|
||||
var _ = Describe("Port forwarding", func() {
|
||||
framework := NewFramework("port-forwarding")
|
||||
framework := NewDefaultFramework("port-forwarding")
|
||||
|
||||
Describe("With a server that expects a client request", func() {
|
||||
It("should support a client that connects, sends no data, and disconnects [Conformance]", func() {
|
||||
|
|
|
@ -159,7 +159,7 @@ func testPreStop(c *client.Client, ns string) {
|
|||
}
|
||||
|
||||
var _ = Describe("PreStop", func() {
|
||||
f := NewFramework("prestop")
|
||||
f := NewDefaultFramework("prestop")
|
||||
|
||||
It("should call prestop when killing a pod [Conformance]", func() {
|
||||
testPreStop(f.Client, f.Namespace.Name)
|
||||
|
|
|
@ -48,7 +48,7 @@ type PrivilegedPodTestConfig struct {
|
|||
}
|
||||
|
||||
var _ = Describe("PrivilegedPod", func() {
|
||||
f := NewFramework("e2e-privilegedpod")
|
||||
f := NewDefaultFramework("e2e-privilegedpod")
|
||||
config := &PrivilegedPodTestConfig{
|
||||
f: f,
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ const (
|
|||
)
|
||||
|
||||
func proxyContext(version string) {
|
||||
f := NewFramework("proxy")
|
||||
f := NewDefaultFramework("proxy")
|
||||
prefix := "/api/" + version
|
||||
|
||||
// Port here has to be kept in sync with default kubelet port.
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("ReplicationController", func() {
|
||||
framework := NewFramework("replication-controller")
|
||||
framework := NewDefaultFramework("replication-controller")
|
||||
|
||||
It("should serve a basic image on each replica with a public image [Conformance]", func() {
|
||||
ServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:1.1")
|
||||
|
|
|
@ -82,7 +82,7 @@ var _ = Describe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
|||
}
|
||||
})
|
||||
|
||||
f = NewFramework("reboot")
|
||||
f = NewDefaultFramework("reboot")
|
||||
|
||||
It("each node by ordering clean reboot and ensure they function upon restart", func() {
|
||||
// clean shutdown and restart
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("ReplicaSet", func() {
|
||||
framework := NewFramework("replicaset")
|
||||
framework := NewDefaultFramework("replicaset")
|
||||
|
||||
It("should serve a basic image on each replica with a public image [Conformance]", func() {
|
||||
ReplicaSetServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:1.1")
|
||||
|
|
|
@ -402,7 +402,7 @@ func expectNodeReadiness(isReady bool, newNode chan *api.Node) {
|
|||
}
|
||||
|
||||
var _ = Describe("Nodes [Disruptive]", func() {
|
||||
framework := NewFramework("resize-nodes")
|
||||
framework := NewDefaultFramework("resize-nodes")
|
||||
var systemPodsNo int
|
||||
var c *client.Client
|
||||
var ns string
|
||||
|
|
|
@ -35,7 +35,7 @@ const (
|
|||
)
|
||||
|
||||
var _ = Describe("ResourceQuota", func() {
|
||||
f := NewFramework("resourcequota")
|
||||
f := NewDefaultFramework("resourcequota")
|
||||
|
||||
It("should create a ResourceQuota and ensure its status is promptly calculated.", func() {
|
||||
By("Creating a ResourceQuota")
|
||||
|
|
|
@ -49,7 +49,7 @@ const (
|
|||
)
|
||||
|
||||
var _ = Describe("Restart [Disruptive]", func() {
|
||||
f := NewFramework("restart")
|
||||
f := NewDefaultFramework("restart")
|
||||
var ps *podStore
|
||||
|
||||
BeforeEach(func() {
|
||||
|
|
|
@ -150,7 +150,7 @@ var _ = Describe("SchedulerPredicates [Serial]", func() {
|
|||
}
|
||||
})
|
||||
|
||||
framework := NewFramework("sched-pred")
|
||||
framework := NewDefaultFramework("sched-pred")
|
||||
|
||||
BeforeEach(func() {
|
||||
c = framework.Client
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Secrets", func() {
|
||||
f := NewFramework("secrets")
|
||||
f := NewDefaultFramework("secrets")
|
||||
|
||||
It("should be consumable from pods in volume [Conformance]", func() {
|
||||
name := "secret-test-" + string(util.NewUUID())
|
||||
|
|
|
@ -58,7 +58,7 @@ func scTestPod(hostIPC bool, hostPID bool) *api.Pod {
|
|||
}
|
||||
|
||||
var _ = Describe("Security Context [Feature:SecurityContext]", func() {
|
||||
framework := NewFramework("security-context")
|
||||
framework := NewDefaultFramework("security-context")
|
||||
|
||||
It("should support pod.Spec.SecurityContext.SupplementalGroups", func() {
|
||||
pod := scTestPod(false, false)
|
||||
|
|
|
@ -62,7 +62,7 @@ const loadBalancerCreateTimeout = 20 * time.Minute
|
|||
var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768}
|
||||
|
||||
var _ = Describe("Services", func() {
|
||||
f := NewFramework("services")
|
||||
f := NewDefaultFramework("services")
|
||||
|
||||
var c *client.Client
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
var serviceAccountTokenNamespaceVersion = version.MustParse("v1.2.0")
|
||||
|
||||
var _ = Describe("ServiceAccounts", func() {
|
||||
f := NewFramework("svcaccounts")
|
||||
f := NewDefaultFramework("svcaccounts")
|
||||
|
||||
It("should ensure a single API token exists", func() {
|
||||
// wait for the service account to reference a single secret
|
||||
|
|
|
@ -40,7 +40,7 @@ func (d durations) Less(i, j int) bool { return d[i] < d[j] }
|
|||
func (d durations) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
|
||||
var _ = Describe("Service endpoints latency", func() {
|
||||
f := NewFramework("svc-latency")
|
||||
f := NewDefaultFramework("svc-latency")
|
||||
|
||||
It("should not be very high [Conformance]", func() {
|
||||
const (
|
||||
|
|
|
@ -210,7 +210,7 @@ var _ = Describe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() {
|
|||
var repoRoot string
|
||||
var client *client.Client
|
||||
|
||||
framework := NewFramework("servicelb")
|
||||
framework := NewDefaultFramework("servicelb")
|
||||
|
||||
BeforeEach(func() {
|
||||
client = framework.Client
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
var _ = Describe("SSH", func() {
|
||||
|
||||
f := NewFramework("ssh")
|
||||
f := NewDefaultFramework("ssh")
|
||||
|
||||
BeforeEach(func() {
|
||||
// When adding more providers here, also implement their functionality in util.go's getSigner(...).
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
)
|
||||
|
||||
var _ = Describe("Ubernetes Lite", func() {
|
||||
framework := NewFramework("ubernetes-lite")
|
||||
framework := NewDefaultFramework("ubernetes-lite")
|
||||
var zoneCount int
|
||||
var err error
|
||||
image := "gcr.io/google_containers/serve_hostname:1.1"
|
||||
|
|
|
@ -353,7 +353,7 @@ func deleteCinderVolume(name string) error {
|
|||
// These tests need privileged containers, which are disabled by default. Run
|
||||
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
|
||||
var _ = Describe("Volumes [Feature:Volumes]", func() {
|
||||
framework := NewFramework("volume")
|
||||
framework := NewDefaultFramework("volume")
|
||||
|
||||
// If 'false', the test won't clear its volumes upon completion. Useful for debugging,
|
||||
// note that namespace deletion is handled by delete-namespace flag
|
||||
|
|
Loading…
Reference in New Issue