mirror of https://github.com/k3s-io/k3s
Cleanup e2e tags
- Remove "Suite" from e2e tag names - Move grouping e2e tags to the front of the test, describe or context name - Move Conformance e2e tags to the end of test names (test specific) - Move Skipped e2e tags to the end of names, but to as high a context as applicablepull/6/head
parent
725155b8e9
commit
2d3c3e1d0b
|
@ -103,7 +103,7 @@ if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
|
|||
if [[ "${PERFORMANCE:-}" == "true" ]]; then
|
||||
: ${MASTER_SIZE:="m3.xlarge"}
|
||||
: ${NUM_MINIONS:="100"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\ssuite\]"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"}
|
||||
else
|
||||
: ${MASTER_SIZE:="m3.large"}
|
||||
: ${MINION_SIZE:="m3.large"}
|
||||
|
@ -118,9 +118,9 @@ CURRENT_RELEASE_PUBLISHED_VERSION="ci/latest-1.1"
|
|||
|
||||
# Specialized to skip when running reboot tests.
|
||||
REBOOT_SKIP_TESTS=(
|
||||
"Skipped"
|
||||
"\[Skipped\]"
|
||||
"Restart\sshould\srestart\sall\snodes"
|
||||
"Example"
|
||||
"\[Example\]"
|
||||
)
|
||||
|
||||
# Specialized tests which should be skipped by default for projects.
|
||||
|
@ -291,7 +291,7 @@ case ${JOB_NAME} in
|
|||
kubernetes-e2e-gce-examples)
|
||||
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-examples"}
|
||||
: ${E2E_NETWORK:="e2e-examples"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Example"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Example\]"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-examples"}
|
||||
: ${PROJECT:="kubernetes-jenkins"}
|
||||
;;
|
||||
|
@ -300,7 +300,7 @@ case ${JOB_NAME} in
|
|||
kubernetes-e2e-gce-autoscaling)
|
||||
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-autoscaling"}
|
||||
: ${E2E_NETWORK:="e2e-autoscaling"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Autoscaling\sSuite"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Autoscaling\]"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-autoscaling"}
|
||||
: ${PROJECT:="k8s-jnks-e2e-gce-autoscaling"}
|
||||
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
|
||||
|
@ -425,7 +425,7 @@ case ${JOB_NAME} in
|
|||
kubernetes-e2e-gce-scalability)
|
||||
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-scalability"}
|
||||
: ${E2E_NETWORK:="e2e-scalability"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Performance\ssuite"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-scalability"}
|
||||
: ${PROJECT:="kubernetes-jenkins"}
|
||||
# Override GCE defaults.
|
||||
|
@ -444,7 +444,7 @@ case ${JOB_NAME} in
|
|||
kubernetes-e2e-gce-scalability-1.1)
|
||||
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-scalability-1.1"}
|
||||
: ${E2E_NETWORK:="e2e-scalability-1-1"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Performance\ssuite"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"}
|
||||
: ${JENKINS_PUBLISHED_VERSION:="ci/latest-1.1"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-scalability-1-1"}
|
||||
: ${KUBE_GCS_STAGING_PATH_SUFFIX:="scalability-1.1"}
|
||||
|
@ -650,7 +650,7 @@ case ${JOB_NAME} in
|
|||
${REBOOT_SKIP_TESTS[@]:+${REBOOT_SKIP_TESTS[@]}}\
|
||||
) --ginkgo.focus=$(join_regex_no_empty \
|
||||
${DISRUPTIVE_TESTS[@]:+${DISRUPTIVE_TESTS[@]}} \
|
||||
"Autoscaling\sSuite.*via\sreplicationController" \
|
||||
"\[Autoscaling\]\sReplicationController" \
|
||||
"GCE\sL7\sLoadBalancer\sController"
|
||||
)"}
|
||||
: ${JENKINS_PUBLISHED_VERSION:="ci/latest-1.1"}
|
||||
|
@ -862,7 +862,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-master"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master"}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-gke-step3-e2e-old)
|
||||
|
@ -896,7 +896,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-cluster"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster"}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-gke-step5-e2e-old)
|
||||
|
@ -977,7 +977,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-master --upgrade-target=release/latest"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master --upgrade-target=release/latest"}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-stable-latest-gke-step3-e2e-old)
|
||||
|
@ -1013,7 +1013,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-cluster --upgrade-target=release/latest"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster --upgrade-target=release/latest"}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-stable-latest-gke-step5-e2e-old)
|
||||
|
@ -1092,7 +1092,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-1.0-current-release-gke-step3-e2e-old)
|
||||
|
@ -1128,7 +1128,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-1.0-current-release-gke-step5-e2e-old)
|
||||
|
@ -1204,7 +1204,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-master"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
|
||||
: ${KUBE_ENABLE_DAEMONSETS:=true}
|
||||
|
@ -1239,7 +1239,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-cluster"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
|
||||
: ${KUBE_ENABLE_DAEMONSETS:=true}
|
||||
|
@ -1316,7 +1316,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
|
||||
|
@ -1353,7 +1353,7 @@ case ${JOB_NAME} in
|
|||
: ${E2E_UP:="false"}
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Skipped.*Cluster\supgrade.*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
# This does a checked upgrade of the MASTER using the locally built release, then runs e2e.
|
||||
echo "Running the checked master upgrade."
|
||||
go run "$(dirname $0)/e2e.go" -build -up -v -test -test_args='--ginkgo.focus=Skipped.*Cluster\supgrade.*gce-upgrade' -check_version_skew=false
|
||||
go run "$(dirname $0)/e2e.go" -build -up -v -test -test_args='--ginkgo.focus=Cluster\sUpgrade.*gce-upgrade' -check_version_skew=false
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Master upgrade complete. Running e2e on the upgraded cluster."
|
||||
go run "$(dirname $0)/e2e.go" -v -version="" -test -check_version_skew=false
|
||||
|
|
|
@ -34,7 +34,7 @@ const (
|
|||
scaleDownTimeout = 30 * time.Minute
|
||||
)
|
||||
|
||||
var _ = Describe("Autoscaling", func() {
|
||||
var _ = Describe("[Autoscaling] [Skipped]", func() {
|
||||
f := NewFramework("autoscaling")
|
||||
var nodeCount int
|
||||
var coresPerNode int
|
||||
|
@ -57,7 +57,7 @@ var _ = Describe("Autoscaling", func() {
|
|||
cleanUpAutoscaler()
|
||||
})
|
||||
|
||||
It("[Skipped][Autoscaling Suite] should scale cluster size based on cpu utilization", func() {
|
||||
It("Should scale cluster size based on cpu utilization", func() {
|
||||
setUpAutoscaler("cpu/node_utilization", 0.4, nodeCount, nodeCount+1)
|
||||
|
||||
// Consume 50% CPU
|
||||
|
@ -71,7 +71,7 @@ var _ = Describe("Autoscaling", func() {
|
|||
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
|
||||
})
|
||||
|
||||
It("[Skipped][Autoscaling Suite] should scale cluster size based on cpu reservation", func() {
|
||||
It("Should scale cluster size based on cpu reservation", func() {
|
||||
setUpAutoscaler("cpu/node_reservation", 0.5, nodeCount, nodeCount+1)
|
||||
|
||||
ReserveCpu(f, "cpu-reservation", 600*nodeCount*coresPerNode)
|
||||
|
@ -81,7 +81,7 @@ var _ = Describe("Autoscaling", func() {
|
|||
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
|
||||
})
|
||||
|
||||
It("[Skipped][Autoscaling Suite] should scale cluster size based on memory utilization", func() {
|
||||
It("Should scale cluster size based on memory utilization", func() {
|
||||
setUpAutoscaler("memory/node_utilization", 0.6, nodeCount, nodeCount+1)
|
||||
|
||||
// Consume 60% of total memory capacity
|
||||
|
@ -96,7 +96,7 @@ var _ = Describe("Autoscaling", func() {
|
|||
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
|
||||
})
|
||||
|
||||
It("[Skipped][Autoscaling Suite] should scale cluster size based on memory reservation", func() {
|
||||
It("Should scale cluster size based on memory reservation", func() {
|
||||
setUpAutoscaler("memory/node_reservation", 0.5, nodeCount, nodeCount+1)
|
||||
|
||||
ReserveMemory(f, "memory-reservation", nodeCount*memCapacityMb*6/10)
|
||||
|
|
|
@ -144,144 +144,142 @@ func nodeUpgradeGKE(v string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var _ = Describe("Skipped", func() {
|
||||
var _ = Describe("Cluster Upgrade [Skipped]", func() {
|
||||
|
||||
Describe("Cluster upgrade", func() {
|
||||
svcName, replicas := "baz", 2
|
||||
var rcName, ip, v string
|
||||
var ingress api.LoadBalancerIngress
|
||||
svcName, replicas := "baz", 2
|
||||
var rcName, ip, v string
|
||||
var ingress api.LoadBalancerIngress
|
||||
|
||||
BeforeEach(func() {
|
||||
// The version is determined once at the beginning of the test so that
|
||||
// the master and nodes won't be skewed if the value changes during the
|
||||
// test.
|
||||
By(fmt.Sprintf("Getting real version for %q", testContext.UpgradeTarget))
|
||||
var err error
|
||||
v, err = realVersion(testContext.UpgradeTarget)
|
||||
expectNoError(err)
|
||||
Logf("Version for %q is %q", testContext.UpgradeTarget, v)
|
||||
})
|
||||
|
||||
f := NewFramework("cluster-upgrade")
|
||||
var w *WebserverTest
|
||||
BeforeEach(func() {
|
||||
By("Setting up the service, RC, and pods")
|
||||
w = NewWebserverTest(f.Client, f.Namespace.Name, svcName)
|
||||
rc := w.CreateWebserverRC(replicas)
|
||||
rcName = rc.ObjectMeta.Name
|
||||
svc := w.BuildServiceSpec()
|
||||
svc.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
w.CreateService(svc)
|
||||
|
||||
By("Waiting for the service to become reachable")
|
||||
result, err := waitForLoadBalancerIngress(f.Client, svcName, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ingresses := result.Status.LoadBalancer.Ingress
|
||||
if len(ingresses) != 1 {
|
||||
Failf("Was expecting only 1 ingress IP but got %d (%v): %v", len(ingresses), ingresses, result)
|
||||
}
|
||||
ingress = ingresses[0]
|
||||
Logf("Got load balancer ingress point %v", ingress)
|
||||
ip = ingress.IP
|
||||
if ip == "" {
|
||||
ip = ingress.Hostname
|
||||
}
|
||||
testLoadBalancerReachable(ingress, 80)
|
||||
|
||||
// TODO(mikedanese): Add setup, validate, and teardown for:
|
||||
// - secrets
|
||||
// - volumes
|
||||
// - persistent volumes
|
||||
})
|
||||
AfterEach(func() {
|
||||
w.Cleanup()
|
||||
})
|
||||
|
||||
Describe("kube-push", func() {
|
||||
BeforeEach(func() {
|
||||
// The version is determined once at the beginning of the test so that
|
||||
// the master and nodes won't be skewed if the value changes during the
|
||||
// test.
|
||||
By(fmt.Sprintf("Getting real version for %q", testContext.UpgradeTarget))
|
||||
var err error
|
||||
v, err = realVersion(testContext.UpgradeTarget)
|
||||
expectNoError(err)
|
||||
Logf("Version for %q is %q", testContext.UpgradeTarget, v)
|
||||
SkipUnlessProviderIs("gce")
|
||||
})
|
||||
|
||||
f := NewFramework("cluster-upgrade")
|
||||
var w *WebserverTest
|
||||
BeforeEach(func() {
|
||||
By("Setting up the service, RC, and pods")
|
||||
w = NewWebserverTest(f.Client, f.Namespace.Name, svcName)
|
||||
rc := w.CreateWebserverRC(replicas)
|
||||
rcName = rc.ObjectMeta.Name
|
||||
svc := w.BuildServiceSpec()
|
||||
svc.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
w.CreateService(svc)
|
||||
|
||||
By("Waiting for the service to become reachable")
|
||||
result, err := waitForLoadBalancerIngress(f.Client, svcName, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ingresses := result.Status.LoadBalancer.Ingress
|
||||
if len(ingresses) != 1 {
|
||||
Failf("Was expecting only 1 ingress IP but got %d (%v): %v", len(ingresses), ingresses, result)
|
||||
}
|
||||
ingress = ingresses[0]
|
||||
Logf("Got load balancer ingress point %v", ingress)
|
||||
ip = ingress.IP
|
||||
if ip == "" {
|
||||
ip = ingress.Hostname
|
||||
}
|
||||
testLoadBalancerReachable(ingress, 80)
|
||||
|
||||
// TODO(mikedanese): Add setup, validate, and teardown for:
|
||||
// - secrets
|
||||
// - volumes
|
||||
// - persistent volumes
|
||||
It("of master should maintain responsive services", func() {
|
||||
By("Validating cluster before master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
By("Performing a master upgrade")
|
||||
testMasterUpgrade(ip, v, masterPush)
|
||||
By("Validating cluster after master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("upgrade-master", func() {
|
||||
BeforeEach(func() {
|
||||
SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
It("should maintain responsive services", func() {
|
||||
By("Validating cluster before master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
By("Performing a master upgrade")
|
||||
testMasterUpgrade(ip, v, masterUpgrade)
|
||||
By("Checking master version")
|
||||
expectNoError(checkMasterVersion(f.Client, v))
|
||||
By("Validating cluster after master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("upgrade-cluster", func() {
|
||||
var tmplBefore, tmplAfter string
|
||||
BeforeEach(func() {
|
||||
if providerIs("gce") {
|
||||
By("Getting the node template before the upgrade")
|
||||
var err error
|
||||
tmplBefore, err = migTemplate()
|
||||
expectNoError(err)
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
w.Cleanup()
|
||||
})
|
||||
|
||||
Describe("kube-push", func() {
|
||||
BeforeEach(func() {
|
||||
SkipUnlessProviderIs("gce")
|
||||
})
|
||||
|
||||
It("of master should maintain responsive services", func() {
|
||||
By("Validating cluster before master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
By("Performing a master upgrade")
|
||||
testMasterUpgrade(ip, v, masterPush)
|
||||
By("Validating cluster after master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("upgrade-master", func() {
|
||||
BeforeEach(func() {
|
||||
SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
It("should maintain responsive services", func() {
|
||||
By("Validating cluster before master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
By("Performing a master upgrade")
|
||||
testMasterUpgrade(ip, v, masterUpgrade)
|
||||
By("Checking master version")
|
||||
expectNoError(checkMasterVersion(f.Client, v))
|
||||
By("Validating cluster after master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("upgrade-cluster", func() {
|
||||
var tmplBefore, tmplAfter string
|
||||
BeforeEach(func() {
|
||||
if providerIs("gce") {
|
||||
By("Getting the node template before the upgrade")
|
||||
var err error
|
||||
tmplBefore, err = migTemplate()
|
||||
expectNoError(err)
|
||||
if providerIs("gce") {
|
||||
By("Cleaning up any unused node templates")
|
||||
var err error
|
||||
tmplAfter, err = migTemplate()
|
||||
if err != nil {
|
||||
Logf("Could not get node template post-upgrade; may have leaked template %s", tmplBefore)
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if providerIs("gce") {
|
||||
By("Cleaning up any unused node templates")
|
||||
var err error
|
||||
tmplAfter, err = migTemplate()
|
||||
if err != nil {
|
||||
Logf("Could not get node template post-upgrade; may have leaked template %s", tmplBefore)
|
||||
return
|
||||
}
|
||||
if tmplBefore == tmplAfter {
|
||||
// The node upgrade failed so there's no need to delete
|
||||
// anything.
|
||||
Logf("Node template %s is still in use; not cleaning up", tmplBefore)
|
||||
return
|
||||
}
|
||||
Logf("Deleting node template %s", tmplBefore)
|
||||
if _, _, err := retryCmd("gcloud", "compute", "instance-templates",
|
||||
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
|
||||
"delete",
|
||||
tmplBefore); err != nil {
|
||||
Logf("gcloud compute instance-templates delete %s call failed with err: %v", tmplBefore, err)
|
||||
Logf("May have leaked instance template %q", tmplBefore)
|
||||
}
|
||||
if tmplBefore == tmplAfter {
|
||||
// The node upgrade failed so there's no need to delete
|
||||
// anything.
|
||||
Logf("Node template %s is still in use; not cleaning up", tmplBefore)
|
||||
return
|
||||
}
|
||||
})
|
||||
Logf("Deleting node template %s", tmplBefore)
|
||||
if _, _, err := retryCmd("gcloud", "compute", "instance-templates",
|
||||
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
|
||||
"delete",
|
||||
tmplBefore); err != nil {
|
||||
Logf("gcloud compute instance-templates delete %s call failed with err: %v", tmplBefore, err)
|
||||
Logf("May have leaked instance template %q", tmplBefore)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
It("should maintain a functioning cluster", func() {
|
||||
SkipUnlessProviderIs("gce", "gke")
|
||||
It("should maintain a functioning cluster", func() {
|
||||
SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
By("Validating cluster before master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
By("Performing a master upgrade")
|
||||
testMasterUpgrade(ip, v, masterUpgrade)
|
||||
By("Checking master version")
|
||||
expectNoError(checkMasterVersion(f.Client, v))
|
||||
By("Validating cluster after master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
By("Performing a node upgrade")
|
||||
testNodeUpgrade(f, nodeUpgrade, replicas, v)
|
||||
By("Validating cluster after node upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
})
|
||||
By("Validating cluster before master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
By("Performing a master upgrade")
|
||||
testMasterUpgrade(ip, v, masterUpgrade)
|
||||
By("Checking master version")
|
||||
expectNoError(checkMasterVersion(f.Client, v))
|
||||
By("Validating cluster after master upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
By("Performing a node upgrade")
|
||||
testNodeUpgrade(f, nodeUpgrade, replicas, v)
|
||||
By("Validating cluster after node upgrade")
|
||||
expectNoError(validate(f, svcName, rcName, ingress, replicas))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -70,11 +70,11 @@ func extractLatencyMetrics(latencies []podLatencyData) LatencyMetric {
|
|||
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99}
|
||||
}
|
||||
|
||||
// This test suite can take a long time to run, so by default it is added to
|
||||
// the ginkgo.skip list (see driver.go).
|
||||
// This test suite can take a long time to run, and can affect or be affected by other tests.
|
||||
// So by default it is added to the ginkgo.skip list (see driver.go).
|
||||
// To run this suite you must explicitly ask for it by setting the
|
||||
// -t/--test flag or ginkgo.focus flag.
|
||||
var _ = Describe("Density", func() {
|
||||
var _ = Describe("Density [Skipped]", func() {
|
||||
var c *client.Client
|
||||
var nodeCount int
|
||||
var RCName string
|
||||
|
@ -151,10 +151,7 @@ var _ = Describe("Density", func() {
|
|||
}
|
||||
})
|
||||
|
||||
// Tests with "Skipped" substring in their name will be skipped when running
|
||||
// e2e test suite without --ginkgo.focus & --ginkgo.skip flags.
|
||||
type Density struct {
|
||||
skip bool
|
||||
// Controls if e2e latency tests should be run (they are slow)
|
||||
runLatencyTest bool
|
||||
podsPerNode int
|
||||
|
@ -163,25 +160,19 @@ var _ = Describe("Density", func() {
|
|||
}
|
||||
|
||||
densityTests := []Density{
|
||||
// This test should not be run in a regular jenkins run, because it is not isolated enough
|
||||
// (metrics from other tests affects this one).
|
||||
// TODO: Reenable once we can measure latency only from a single test.
|
||||
// TODO: Expose runLatencyTest as ginkgo flag.
|
||||
{podsPerNode: 3, skip: true, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerNode: 30, skip: true, runLatencyTest: true, interval: 10 * time.Second},
|
||||
{podsPerNode: 3, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerNode: 30, runLatencyTest: true, interval: 10 * time.Second},
|
||||
// More than 30 pods per node is outside our v1.0 goals.
|
||||
// We might want to enable those tests in the future.
|
||||
{podsPerNode: 50, skip: true, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerNode: 100, skip: true, runLatencyTest: false, interval: 1 * time.Second},
|
||||
{podsPerNode: 50, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerNode: 100, runLatencyTest: false, interval: 1 * time.Second},
|
||||
}
|
||||
|
||||
for _, testArg := range densityTests {
|
||||
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerNode)
|
||||
if testArg.podsPerNode == 30 {
|
||||
name = "[Performance suite] " + name
|
||||
}
|
||||
if testArg.skip {
|
||||
name = "[Skipped] " + name
|
||||
name = "[Performance] " + name
|
||||
}
|
||||
itArg := testArg
|
||||
It(name, func() {
|
||||
|
|
|
@ -123,7 +123,7 @@ func TestE2E(t *testing.T) {
|
|||
|
||||
// Disable skipped tests unless they are explicitly requested.
|
||||
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
|
||||
config.GinkgoConfig.SkipString = "Skipped"
|
||||
config.GinkgoConfig.SkipString = `\[Skipped\]`
|
||||
}
|
||||
gomega.RegisterFailHandler(ginkgo.Fail)
|
||||
|
||||
|
|
|
@ -37,21 +37,23 @@ var _ = Describe("EmptyDir volumes", func() {
|
|||
|
||||
f := NewFramework("emptydir")
|
||||
|
||||
// TODO: Enable security context everywhere and remove skip for FSGroup
|
||||
It("new files should be created with FSGroup ownership when container is root [Conformance] [Skipped]", func() {
|
||||
doTestSetgidFSGroup(f, testImageRootUid, api.StorageMediumMemory)
|
||||
})
|
||||
// TODO: Remove [Skipped] when security context is enabled everywhere
|
||||
Context("when FSGroup is specified [Skipped]", func() {
|
||||
It("new files should be created with FSGroup ownership when container is root [Conformance]", func() {
|
||||
doTestSetgidFSGroup(f, testImageRootUid, api.StorageMediumMemory)
|
||||
})
|
||||
|
||||
It("new files should be created with FSGroup ownership when container is non-root [Conformance] [Skipped]", func() {
|
||||
doTestSetgidFSGroup(f, testImageNonRootUid, api.StorageMediumMemory)
|
||||
})
|
||||
It("new files should be created with FSGroup ownership when container is non-root [Conformance]", func() {
|
||||
doTestSetgidFSGroup(f, testImageNonRootUid, api.StorageMediumMemory)
|
||||
})
|
||||
|
||||
It("volume on default medium should have the correct mode using FSGroup [Conformance] [Skipped]", func() {
|
||||
doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumDefault)
|
||||
})
|
||||
It("volume on default medium should have the correct mode using FSGroup [Conformance]", func() {
|
||||
doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumDefault)
|
||||
})
|
||||
|
||||
It("volume on tmpfs should have the correct mode using FSGroup [Conformance] [Skipped]", func() {
|
||||
doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumMemory)
|
||||
It("volume on tmpfs should have the correct mode using FSGroup [Conformance]", func() {
|
||||
doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumMemory)
|
||||
})
|
||||
})
|
||||
|
||||
It("volume on tmpfs should have the correct mode [Conformance]", func() {
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
dnsReadyTimeout = time.Minute
|
||||
)
|
||||
|
||||
const queryDnsPythonTemplate string = `
|
||||
import socket
|
||||
try:
|
||||
socket.gethostbyname('%s')
|
||||
print 'ok'
|
||||
except:
|
||||
print 'err'`
|
||||
|
||||
var _ = Describe("[Example] ClusterDns", func() {
|
||||
framework := NewFramework("cluster-dns")
|
||||
|
||||
var c *client.Client
|
||||
BeforeEach(func() {
|
||||
c = framework.Client
|
||||
})
|
||||
|
||||
It("should create pod that uses dns [Conformance]", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "examples/cluster-dns", file)
|
||||
}
|
||||
|
||||
// contrary to the example, this test does not use contexts, for simplicity
|
||||
// namespaces are passed directly.
|
||||
// Also, for simplicity, we don't use yamls with namespaces, but we
|
||||
// create testing namespaces instead.
|
||||
|
||||
backendRcYaml := mkpath("dns-backend-rc.yaml")
|
||||
backendRcName := "dns-backend"
|
||||
backendSvcYaml := mkpath("dns-backend-service.yaml")
|
||||
backendSvcName := "dns-backend"
|
||||
backendPodName := "dns-backend"
|
||||
frontendPodYaml := mkpath("dns-frontend-pod.yaml")
|
||||
frontendPodName := "dns-frontend"
|
||||
frontendPodContainerName := "dns-frontend"
|
||||
|
||||
podOutput := "Hello World!"
|
||||
|
||||
// we need two namespaces anyway, so let's forget about
|
||||
// the one created in BeforeEach and create two new ones.
|
||||
namespaces := []*api.Namespace{nil, nil}
|
||||
for i := range namespaces {
|
||||
var err error
|
||||
namespaces[i], err = createTestingNS(fmt.Sprintf("dnsexample%d", i), c)
|
||||
if testContext.DeleteNamespace {
|
||||
if namespaces[i] != nil {
|
||||
defer deleteNS(c, namespaces[i].Name, 5*time.Minute /* namespace deletion timeout */)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
}
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
runKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns))
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
runKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns))
|
||||
}
|
||||
|
||||
// wait for objects
|
||||
for _, ns := range namespaces {
|
||||
waitForRCPodsRunning(c, ns.Name, backendRcName)
|
||||
waitForService(c, ns.Name, backendSvcName, true, poll, serviceStartTimeout)
|
||||
}
|
||||
// it is not enough that pods are running because they may be set to running, but
|
||||
// the application itself may have not been initialized. Just query the application.
|
||||
for _, ns := range namespaces {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
|
||||
pods, err := c.Pods(ns.Name).List(label, fields.Everything())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = podsResponding(c, ns.Name, backendPodName, false, pods)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
|
||||
Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
||||
|
||||
err = serviceResponding(c, ns.Name, backendSvcName)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond")
|
||||
}
|
||||
|
||||
// Now another tricky part:
|
||||
// It may happen that the service name is not yet in DNS.
|
||||
// So if we start our pod, it will fail. We must make sure
|
||||
// the name is already resolvable. So let's try to query DNS from
|
||||
// the pod we have, until we find our service name.
|
||||
// This complicated code may be removed if the pod itself retried after
|
||||
// dns error or timeout.
|
||||
// This code is probably unnecessary, but let's stay on the safe side.
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName}))
|
||||
pods, err := c.Pods(namespaces[0].Name).List(label, fields.Everything())
|
||||
|
||||
if err != nil || pods == nil || len(pods.Items) == 0 {
|
||||
Failf("no running pods found")
|
||||
}
|
||||
podName := pods.Items[0].Name
|
||||
|
||||
queryDns := fmt.Sprintf(queryDnsPythonTemplate, backendSvcName+"."+namespaces[0].Name)
|
||||
_, err = lookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
|
||||
|
||||
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.cluster.local", fmt.Sprintf("dns-backend.%s.cluster.local", namespaces[0].Name))
|
||||
|
||||
// create a pod in each namespace
|
||||
for _, ns := range namespaces {
|
||||
newKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).withStdinData(updatedPodYaml).execOrDie()
|
||||
}
|
||||
|
||||
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
|
||||
// that we cannot wait for the pods to be running because our pods terminate by themselves.
|
||||
for _, ns := range namespaces {
|
||||
err := waitForPodNotPending(c, ns.Name, frontendPodName)
|
||||
expectNoError(err)
|
||||
}
|
||||
|
||||
// wait for pods to print their result
|
||||
for _, ns := range namespaces {
|
||||
_, err := lookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, podStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func getNsCmdFlag(ns *api.Namespace) string {
|
||||
return fmt.Sprintf("--namespace=%v", ns.Name)
|
||||
}
|
|
@ -150,7 +150,7 @@ T:
|
|||
Ω(totalTransactions).Should(BeNumerically(">", minExpected))
|
||||
}
|
||||
|
||||
var _ = Describe("[Skipped][Example] Pet Store", func() {
|
||||
var _ = Describe("[Example] Pet Store [Skipped]", func() {
|
||||
|
||||
// The number of minions dictates total number of generators/transaction expectations.
|
||||
var minionCount int
|
|
@ -36,18 +36,9 @@ import (
|
|||
const (
|
||||
podListTimeout = time.Minute
|
||||
serverStartTimeout = podStartTimeout + 3*time.Minute
|
||||
dnsReadyTimeout = time.Minute
|
||||
)
|
||||
|
||||
const queryDnsPythonTemplate string = `
|
||||
import socket
|
||||
try:
|
||||
socket.gethostbyname('%s')
|
||||
print 'ok'
|
||||
except:
|
||||
print 'err'`
|
||||
|
||||
var _ = Describe("Examples e2e", func() {
|
||||
var _ = Describe("[Example] [Skipped]", func() {
|
||||
framework := NewFramework("examples")
|
||||
var c *client.Client
|
||||
var ns string
|
||||
|
@ -56,7 +47,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
ns = framework.Namespace.Name
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]Redis", func() {
|
||||
Describe("Redis", func() {
|
||||
It("should create and stop redis servers", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "examples/redis", file)
|
||||
|
@ -118,7 +109,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]Celery-RabbitMQ", func() {
|
||||
Describe("Celery-RabbitMQ", func() {
|
||||
It("should create and stop celery+rabbitmq servers", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "examples", "celery-rabbitmq", file)
|
||||
|
@ -161,7 +152,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]Spark", func() {
|
||||
Describe("Spark", func() {
|
||||
It("should start spark master, driver and workers", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "examples", "spark", file)
|
||||
|
@ -197,7 +188,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]Cassandra", func() {
|
||||
Describe("Cassandra", func() {
|
||||
It("should create and scale cassandra", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "examples", "cassandra", file)
|
||||
|
@ -239,7 +230,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]Storm", func() {
|
||||
Describe("Storm", func() {
|
||||
It("should create and stop Zookeeper, Nimbus and Storm worker servers", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "examples", "storm", file)
|
||||
|
@ -293,7 +284,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]Liveness", func() {
|
||||
Describe("Liveness", func() {
|
||||
It("liveness pods should be automatically restarted", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "docs", "user-guide", "liveness", file)
|
||||
|
@ -325,7 +316,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]Secret", func() {
|
||||
Describe("Secret", func() {
|
||||
It("should create a pod that reads a secret", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "docs", "user-guide", "secrets", file)
|
||||
|
@ -344,7 +335,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]Downward API", func() {
|
||||
Describe("Downward API", func() {
|
||||
It("should create a pod that prints his name and namespace", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "docs", "user-guide", "downward-api", file)
|
||||
|
@ -364,7 +355,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]RethinkDB", func() {
|
||||
Describe("RethinkDB", func() {
|
||||
It("should create and stop rethinkdb servers", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "examples", "rethinkdb", file)
|
||||
|
@ -406,7 +397,7 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Describe("[Skipped][Example]Hazelcast", func() {
|
||||
Describe("Hazelcast", func() {
|
||||
It("should create and scale hazelcast", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "examples", "hazelcast", file)
|
||||
|
@ -436,113 +427,6 @@ var _ = Describe("Examples e2e", func() {
|
|||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("[Example]ClusterDns", func() {
|
||||
It("should create pod that uses dns [Conformance]", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(testContext.RepoRoot, "examples/cluster-dns", file)
|
||||
}
|
||||
|
||||
// contrary to the example, this test does not use contexts, for simplicity
|
||||
// namespaces are passed directly.
|
||||
// Also, for simplicity, we don't use yamls with namespaces, but we
|
||||
// create testing namespaces instead.
|
||||
|
||||
backendRcYaml := mkpath("dns-backend-rc.yaml")
|
||||
backendRcName := "dns-backend"
|
||||
backendSvcYaml := mkpath("dns-backend-service.yaml")
|
||||
backendSvcName := "dns-backend"
|
||||
backendPodName := "dns-backend"
|
||||
frontendPodYaml := mkpath("dns-frontend-pod.yaml")
|
||||
frontendPodName := "dns-frontend"
|
||||
frontendPodContainerName := "dns-frontend"
|
||||
|
||||
podOutput := "Hello World!"
|
||||
|
||||
// we need two namespaces anyway, so let's forget about
|
||||
// the one created in BeforeEach and create two new ones.
|
||||
namespaces := []*api.Namespace{nil, nil}
|
||||
for i := range namespaces {
|
||||
var err error
|
||||
namespaces[i], err = createTestingNS(fmt.Sprintf("dnsexample%d", i), c)
|
||||
if testContext.DeleteNamespace {
|
||||
if namespaces[i] != nil {
|
||||
defer deleteNS(c, namespaces[i].Name, 5*time.Minute /* namespace deletion timeout */)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
}
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
runKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns))
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
runKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns))
|
||||
}
|
||||
|
||||
// wait for objects
|
||||
for _, ns := range namespaces {
|
||||
waitForRCPodsRunning(c, ns.Name, backendRcName)
|
||||
waitForService(c, ns.Name, backendSvcName, true, poll, serviceStartTimeout)
|
||||
}
|
||||
// it is not enough that pods are running because they may be set to running, but
|
||||
// the application itself may have not been initialized. Just query the application.
|
||||
for _, ns := range namespaces {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
|
||||
pods, err := c.Pods(ns.Name).List(label, fields.Everything())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = podsResponding(c, ns.Name, backendPodName, false, pods)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
|
||||
Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
||||
|
||||
err = serviceResponding(c, ns.Name, backendSvcName)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond")
|
||||
}
|
||||
|
||||
// Now another tricky part:
|
||||
// It may happen that the service name is not yet in DNS.
|
||||
// So if we start our pod, it will fail. We must make sure
|
||||
// the name is already resolvable. So let's try to query DNS from
|
||||
// the pod we have, until we find our service name.
|
||||
// This complicated code may be removed if the pod itself retried after
|
||||
// dns error or timeout.
|
||||
// This code is probably unnecessary, but let's stay on the safe side.
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName}))
|
||||
pods, err := c.Pods(namespaces[0].Name).List(label, fields.Everything())
|
||||
|
||||
if err != nil || pods == nil || len(pods.Items) == 0 {
|
||||
Failf("no running pods found")
|
||||
}
|
||||
podName := pods.Items[0].Name
|
||||
|
||||
queryDns := fmt.Sprintf(queryDnsPythonTemplate, backendSvcName+"."+namespaces[0].Name)
|
||||
_, err = lookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
|
||||
|
||||
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.cluster.local", fmt.Sprintf("dns-backend.%s.cluster.local", namespaces[0].Name))
|
||||
|
||||
// create a pod in each namespace
|
||||
for _, ns := range namespaces {
|
||||
newKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).withStdinData(updatedPodYaml).execOrDie()
|
||||
}
|
||||
|
||||
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
|
||||
// that we cannot wait for the pods to be running because our pods terminate by themselves.
|
||||
for _, ns := range namespaces {
|
||||
err := waitForPodNotPending(c, ns.Name, frontendPodName)
|
||||
expectNoError(err)
|
||||
}
|
||||
|
||||
// wait for pods to print their result
|
||||
for _, ns := range namespaces {
|
||||
_, err := lookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, podStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) {
|
||||
|
@ -564,10 +448,6 @@ func makeHttpRequestToService(c *client.Client, ns, service, path string, timeou
|
|||
return string(result), err
|
||||
}
|
||||
|
||||
func getNsCmdFlag(ns *api.Namespace) string {
|
||||
return fmt.Sprintf("--namespace=%v", ns.Name)
|
||||
}
|
||||
|
||||
// pass enough context with the 'old' parameter so that it replaces what your really intended.
|
||||
func prepareResourceWithReplacedString(inputFile, old, new string) string {
|
||||
f, err := os.Open(inputFile)
|
||||
|
@ -602,38 +482,3 @@ func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func
|
|||
fn(*pod)
|
||||
}
|
||||
}
|
||||
|
||||
func lookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
return runKubectlOrDie("log", podName, container, fmt.Sprintf("--namespace=%v", ns))
|
||||
})
|
||||
}
|
||||
|
||||
func lookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
return runKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
|
||||
})
|
||||
}
|
||||
|
||||
func lookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
// use the first container
|
||||
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
|
||||
args = append(args, command...)
|
||||
return runKubectlOrDie(args...)
|
||||
})
|
||||
}
|
||||
|
||||
// Looks for the given string in the output of fn, repeatedly calling fn until
|
||||
// the timeout is reached or the string is found. Returns last log and possibly
|
||||
// error if the string was not found.
|
||||
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) {
|
||||
result = fn()
|
||||
if strings.Contains(result, expectedString) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -33,28 +32,31 @@ const (
|
|||
stabilityTimeout = 10 * time.Minute
|
||||
)
|
||||
|
||||
var _ = Describe("Horizontal pod autoscaling", func() {
|
||||
|
||||
var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Skipped]", func() {
|
||||
var rc *ResourceConsumer
|
||||
f := NewFramework("horizontal-pod-autoscaling")
|
||||
|
||||
titleUp := "%s should scale from 1 pod to 3 pods and from 3 to 5 (via %s, with scale resource: CPU)"
|
||||
titleDown := "%s should scale from 5 pods to 3 pods and from 3 to 1 (via %s, with scale resource: CPU)"
|
||||
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
|
||||
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
|
||||
|
||||
// CPU tests via deployments
|
||||
It(fmt.Sprintf(titleUp, "[Skipped]", kindDeployment), func() {
|
||||
scaleUp("deployment", kindDeployment, rc, f)
|
||||
})
|
||||
It(fmt.Sprintf(titleDown, "[Skipped]", kindDeployment), func() {
|
||||
scaleDown("deployment", kindDeployment, rc, f)
|
||||
Describe("Deployment", func() {
|
||||
// CPU tests via deployments
|
||||
It(titleUp, func() {
|
||||
scaleUp("deployment", kindDeployment, rc, f)
|
||||
})
|
||||
It(titleDown, func() {
|
||||
scaleDown("deployment", kindDeployment, rc, f)
|
||||
})
|
||||
})
|
||||
|
||||
// CPU tests via replication controllers
|
||||
It(fmt.Sprintf(titleUp, "[Skipped][Autoscaling Suite]", kindRC), func() {
|
||||
scaleUp("rc", kindRC, rc, f)
|
||||
})
|
||||
It(fmt.Sprintf(titleDown, "[Skipped][Autoscaling Suite]", kindRC), func() {
|
||||
scaleDown("rc", kindRC, rc, f)
|
||||
Describe("ReplicationController", func() {
|
||||
// CPU tests via replication controllers
|
||||
It(titleUp, func() {
|
||||
scaleUp("rc", kindRC, rc, f)
|
||||
})
|
||||
It(titleDown, func() {
|
||||
scaleDown("rc", kindRC, rc, f)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
|
|
@ -26,10 +26,10 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
var _ = Describe("Initial Resources", func() {
|
||||
var _ = Describe("Initial Resources [Skipped] ", func() {
|
||||
f := NewFramework("initial-resources")
|
||||
|
||||
It("[Skipped] should set initial resources based on historical data", func() {
|
||||
It("should set initial resources based on historical data", func() {
|
||||
// Cleanup data in InfluxDB that left from previous tests.
|
||||
influxdbClient, err := getInfluxdbClient(f.Client)
|
||||
expectNoError(err, "failed to create influxdb client")
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("[Performance Suite] Latency", func() {
|
||||
var _ = Describe("[Performance] Latency [Skipped]", func() {
|
||||
var c *client.Client
|
||||
var nodeCount int
|
||||
var additionalPodsPrefix string
|
||||
|
@ -100,8 +100,7 @@ var _ = Describe("[Performance Suite] Latency", func() {
|
|||
}
|
||||
})
|
||||
|
||||
// Skipped to avoid running in e2e
|
||||
It("[Skipped] pod start latency should be acceptable", func() {
|
||||
It("pod start latency should be acceptable", func() {
|
||||
runLatencyTest(nodeCount, c, ns)
|
||||
})
|
||||
})
|
||||
|
|
|
@ -47,7 +47,7 @@ const (
|
|||
// the ginkgo.skip list (see driver.go).
|
||||
// To run this suite you must explicitly ask for it by setting the
|
||||
// -t/--test flag or ginkgo.focus flag.
|
||||
var _ = Describe("Load capacity", func() {
|
||||
var _ = Describe("[Performance] Load capacity [Skipped]", func() {
|
||||
var c *client.Client
|
||||
var nodeCount int
|
||||
var ns string
|
||||
|
@ -97,7 +97,7 @@ var _ = Describe("Load capacity", func() {
|
|||
}
|
||||
|
||||
for _, testArg := range loadTests {
|
||||
name := fmt.Sprintf("[Skipped] [Performance suite] should be able to handle %v pods per node", testArg.podsPerNode)
|
||||
name := fmt.Sprintf("should be able to handle %v pods per node", testArg.podsPerNode)
|
||||
itArg := testArg
|
||||
|
||||
It(name, func() {
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
// Marked with [Skipped] to skip the test by default (see driver.go),
|
||||
// the test needs privileged containers, which are disabled by default.
|
||||
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=PersistentVolume"
|
||||
var _ = Describe("[Skipped] persistentVolumes", func() {
|
||||
var _ = Describe("PersistentVolumes [Skipped]", func() {
|
||||
framework := NewFramework("pv")
|
||||
var c *client.Client
|
||||
var ns string
|
||||
|
@ -41,7 +41,7 @@ var _ = Describe("[Skipped] persistentVolumes", func() {
|
|||
ns = framework.Namespace.Name
|
||||
})
|
||||
|
||||
It("PersistentVolume", func() {
|
||||
It("NFS volume can be created, bound, retrieved, unbound, and used by a pod", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: ns,
|
||||
prefix: "nfs",
|
||||
|
|
|
@ -57,7 +57,7 @@ func scTestPod(hostIPC bool, hostPID bool) *api.Pod {
|
|||
return pod
|
||||
}
|
||||
|
||||
var _ = Describe("[Skipped] Security Context", func() {
|
||||
var _ = Describe("Security Context [Skipped]", func() {
|
||||
framework := NewFramework("security-context")
|
||||
|
||||
It("should support pod.Spec.SecurityContext.SupplementalGroups", func() {
|
||||
|
|
|
@ -2462,3 +2462,41 @@ func waitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Du
|
|||
})
|
||||
return address, err
|
||||
}
|
||||
|
||||
// Looks for the given string in the log of a specific pod container
|
||||
func lookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
return runKubectlOrDie("log", podName, container, fmt.Sprintf("--namespace=%v", ns))
|
||||
})
|
||||
}
|
||||
|
||||
// Looks for the given string in a file in a specific pod container
|
||||
func lookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
return runKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
|
||||
})
|
||||
}
|
||||
|
||||
// Looks for the given string in the output of a command executed in a specific pod container
|
||||
func lookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
// use the first container
|
||||
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
|
||||
args = append(args, command...)
|
||||
return runKubectlOrDie(args...)
|
||||
})
|
||||
}
|
||||
|
||||
// Looks for the given string in the output of fn, repeatedly calling fn until
|
||||
// the timeout is reached or the string is found. Returns last log and possibly
|
||||
// error if the string was not found.
|
||||
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) {
|
||||
result = fn()
|
||||
if strings.Contains(result, expectedString) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -312,7 +312,10 @@ func deleteCinderVolume(name string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var _ = Describe("Volumes", func() {
|
||||
// Marked with [Skipped] to skip the test by default (see driver.go),
|
||||
// these tests needs privileged containers, which are disabled by default.
|
||||
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=Volume"
|
||||
var _ = Describe("Volumes [Skipped]", func() {
|
||||
framework := NewFramework("volume")
|
||||
|
||||
// If 'false', the test won't clear its volumes upon completion. Useful for debugging,
|
||||
|
@ -331,10 +334,7 @@ var _ = Describe("Volumes", func() {
|
|||
// NFS
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Marked with [Skipped] to skip the test by default (see driver.go),
|
||||
// the test needs privileged containers, which are disabled by default.
|
||||
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=Volume"
|
||||
Describe("[Skipped] NFS", func() {
|
||||
Describe("NFS", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
|
@ -368,10 +368,7 @@ var _ = Describe("Volumes", func() {
|
|||
// Gluster
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Marked with [Skipped] to skip the test by default (see driver.go),
|
||||
// the test needs privileged containers, which are disabled by default.
|
||||
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=Volume"
|
||||
Describe("[Skipped] GlusterFS", func() {
|
||||
Describe("GlusterFS", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
|
@ -445,13 +442,12 @@ var _ = Describe("Volumes", func() {
|
|||
// iSCSI
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Marked with [Skipped] to skip the test by default (see driver.go),
|
||||
// the test needs privileged containers, which are disabled by default.
|
||||
// The test needs privileged containers, which are disabled by default.
|
||||
// Also, make sure that iscsiadm utility and iscsi target kernel modules
|
||||
// are installed on all nodes!
|
||||
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI"
|
||||
|
||||
Describe("[Skipped] iSCSI", func() {
|
||||
Describe("iSCSI", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
|
@ -492,12 +488,7 @@ var _ = Describe("Volumes", func() {
|
|||
// Ceph RBD
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Marked with [Skipped] to skip the test by default (see driver.go),
|
||||
// the test needs privileged containers, which are disabled by default.
|
||||
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=RBD"
|
||||
|
||||
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=Volume"
|
||||
Describe("[Skipped] Ceph RBD", func() {
|
||||
Describe("Ceph RBD", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
|
@ -569,10 +560,7 @@ var _ = Describe("Volumes", func() {
|
|||
// Ceph
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Marked with [Skipped] to skip the test by default (see driver.go),
|
||||
// the test needs privileged containers, which are disabled by default.
|
||||
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=Volume"
|
||||
Describe("[Skipped] CephFS", func() {
|
||||
Describe("CephFS", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
|
@ -638,13 +626,12 @@ var _ = Describe("Volumes", func() {
|
|||
// OpenStack Cinder
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Marked with [Skipped] to skip the test by default (see driver.go),
|
||||
// The test assumes that OpenStack client tools are installed
|
||||
// This test assumes that OpenStack client tools are installed
|
||||
// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
|
||||
// and that the usual OpenStack authentication env. variables are set
|
||||
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
|
||||
|
||||
Describe("[Skipped] Cinder", func() {
|
||||
Describe("Cinder", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
|
|
Loading…
Reference in New Issue