Merge pull request #46135 from krousey/upgrades

Automatic merge from submit-queue (batch tested with PRs 47470, 47260, 47411, 46852, 46135)

Write reports for each upgrade test

Due to the way Ginkgo runs individual test cases and the level of coordination required for the upgrade tests, they were all run under a single Ginkgo test case. This PR generates and auxiliary report that break out the results of each upgrade test. This is accomplished by:

  1) Wrapping `ginkgo.Fail` and `ginkgo.Skip` to get the actual failure or skip messages.
  2) Recovering that info in the upgrade test to generate an auxiliary report.

I suggest reviewing commit by commit. 

Sample report: https://storage.googleapis.com/krouseytestreports/logs/results/1/artifacts/junit_upgrades.xml

Fixes: #47371
pull/6/head
Kubernetes Submit Queue 2017-06-14 12:52:27 -07:00 committed by GitHub
commit 16640d892f
19 changed files with 469 additions and 90 deletions

View File

@ -457,6 +457,7 @@ staging/src/k8s.io/metrics/pkg/apis/custom_metrics/install
staging/src/k8s.io/metrics/pkg/apis/metrics/install
staging/src/k8s.io/sample-apiserver
staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/install
test/e2e/framework/ginkgowrapper
test/e2e/perftype
test/e2e_node/perftype
test/e2e_node/runner/local
@ -495,3 +496,4 @@ test/integration/thirdparty
test/integration/ttlcontroller
test/soak/cauldron
test/soak/serve_hostnames
test/utils/junit

View File

@ -164,6 +164,7 @@ go_library(
"//test/e2e/chaosmonkey:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/perf:go_default_library",
"//test/e2e/scheduling:go_default_library",
@ -171,6 +172,7 @@ go_library(
"//test/e2e_federation:go_default_library",
"//test/images/net/nat:go_default_library",
"//test/utils:go_default_library",
"//test/utils/junit:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/elazarl/goproxy:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",

View File

@ -17,11 +17,20 @@ limitations under the License.
package e2e
import (
"encoding/xml"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"k8s.io/client-go/discovery"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
"k8s.io/kubernetes/test/e2e/upgrades"
"k8s.io/kubernetes/test/utils/junit"
. "github.com/onsi/ginkgo"
)
@ -45,32 +54,24 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := map[string]*framework.Framework{}
for _, t := range upgradeTests {
testFrameworks[t.Name()] = framework.NewDefaultFramework(t.Name())
}
testFrameworks := createUpgradeFrameworks()
framework.KubeDescribe("master upgrade", func() {
It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
cm := chaosmonkey.New(func() {
testSuite := &junit.TestSuite{Name: "Master upgrade"}
masterUpgradeTest := &junit.TestCase{Name: "master-upgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, masterUpgradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, masterUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
})
for _, t := range upgradeTests {
cma := chaosMonkeyAdapter{
test: t,
framework: testFrameworks[t.Name()],
upgradeType: upgrades.MasterUpgrade,
upgCtx: *upgCtx,
}
cm.Register(cma.Test)
}
cm.Do()
runUpgradeSuite(f, testFrameworks, testSuite, upgCtx, upgrades.MasterUpgrade, upgradeFunc)
})
})
@ -79,21 +80,17 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
cm := chaosmonkey.New(func() {
testSuite := &junit.TestSuite{Name: "Node upgrade"}
nodeUpgradeTest := &junit.TestCase{Name: "node-upgrade", Classname: "upgrade_tests"}
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, nodeUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
})
for _, t := range upgradeTests {
cma := chaosMonkeyAdapter{
test: t,
framework: testFrameworks[t.Name()],
upgradeType: upgrades.NodeUpgrade,
upgCtx: *upgCtx,
}
cm.Register(cma.Test)
}
cm.Do()
runUpgradeSuite(f, testFrameworks, testSuite, upgCtx, upgrades.NodeUpgrade, upgradeFunc)
})
})
@ -102,23 +99,19 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
cm := chaosmonkey.New(func() {
testSuite := &junit.TestSuite{Name: "Cluster upgrade"}
clusterUpgradeTest := &junit.TestCase{Name: "cluster-upgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, clusterUpgradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, clusterUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
})
for _, t := range upgradeTests {
cma := chaosMonkeyAdapter{
test: t,
framework: testFrameworks[t.Name()],
upgradeType: upgrades.ClusterUpgrade,
upgCtx: *upgCtx,
}
cm.Register(cma.Test)
}
cm.Do()
runUpgradeSuite(f, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
})
@ -138,24 +131,21 @@ var _ = framework.KubeDescribe("Downgrade [Feature:Downgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
cm := chaosmonkey.New(func() {
testSuite := &junit.TestSuite{Name: "Cluster downgrade"}
clusterDowngradeTest := &junit.TestCase{Name: "cluster-downgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, clusterDowngradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, clusterDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
})
for _, t := range upgradeTests {
cma := chaosMonkeyAdapter{
test: t,
framework: testFrameworks[t.Name()],
upgradeType: upgrades.ClusterUpgrade,
upgCtx: *upgCtx,
}
cm.Register(cma.Test)
}
cm.Do()
runUpgradeSuite(f, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
})
@ -165,55 +155,138 @@ var _ = framework.KubeDescribe("etcd Upgrade [Feature:EtcdUpgrade]", func() {
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := map[string]*framework.Framework{}
for _, t := range upgradeTests {
testFrameworks[t.Name()] = framework.NewDefaultFramework(t.Name())
}
testFrameworks := createUpgradeFrameworks()
framework.KubeDescribe("etcd upgrade", func() {
It("should maintain a functioning cluster", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), "")
framework.ExpectNoError(err)
cm := chaosmonkey.New(func() {
framework.ExpectNoError(framework.EtcdUpgrade(framework.TestContext.EtcdUpgradeStorage, framework.TestContext.EtcdUpgradeVersion))
// TODO(mml): verify the etcd version
})
for _, t := range upgradeTests {
cma := chaosMonkeyAdapter{
test: t,
framework: testFrameworks[t.Name()],
upgradeType: upgrades.EtcdUpgrade,
upgCtx: *upgCtx,
}
cm.Register(cma.Test)
}
testSuite := &junit.TestSuite{Name: "Etcd upgrade"}
etcdTest := &junit.TestCase{Name: "etcd-upgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, etcdTest)
cm.Do()
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, etcdTest)
framework.ExpectNoError(framework.EtcdUpgrade(framework.TestContext.EtcdUpgradeStorage, framework.TestContext.EtcdUpgradeVersion))
}
runUpgradeSuite(f, testFrameworks, testSuite, upgCtx, upgrades.EtcdUpgrade, upgradeFunc)
})
})
})
type chaosMonkeyAdapter struct {
test upgrades.Test
testReport *junit.TestCase
framework *framework.Framework
upgradeType upgrades.UpgradeType
upgCtx upgrades.UpgradeContext
}
func (cma *chaosMonkeyAdapter) Test(sem *chaosmonkey.Semaphore) {
start := time.Now()
var once sync.Once
ready := func() {
once.Do(func() {
sem.Ready()
})
}
defer finalizeUpgradeTest(start, cma.testReport)
defer ready()
if skippable, ok := cma.test.(upgrades.Skippable); ok && skippable.Skip(cma.upgCtx) {
By("skipping test " + cma.test.Name())
sem.Ready()
cma.testReport.Skipped = "skipping test " + cma.test.Name()
return
}
defer cma.test.Teardown(cma.framework)
cma.test.Setup(cma.framework)
sem.Ready()
ready()
cma.test.Test(cma.framework, sem.StopCh, cma.upgradeType)
}
func finalizeUpgradeTest(start time.Time, tc *junit.TestCase) {
tc.Time = time.Since(start).Seconds()
r := recover()
if r == nil {
return
}
switch r := r.(type) {
case ginkgowrapper.FailurePanic:
tc.Failures = []*junit.Failure{
{
Message: r.Message,
Type: "Failure",
Value: fmt.Sprintf("%s\n\n%s", r.Message, r.FullStackTrace),
},
}
case ginkgowrapper.SkipPanic:
tc.Skipped = fmt.Sprintf("%s:%d %q", r.Filename, r.Line, r.Message)
default:
tc.Errors = []*junit.Error{
{
Message: fmt.Sprintf("%v", r),
Type: "Panic",
Value: fmt.Sprintf("%v", r),
},
}
}
}
func createUpgradeFrameworks() map[string]*framework.Framework {
testFrameworks := map[string]*framework.Framework{}
for _, t := range upgradeTests {
testFrameworks[t.Name()] = framework.NewDefaultFramework(t.Name())
}
return testFrameworks
}
func runUpgradeSuite(
f *framework.Framework,
testFrameworks map[string]*framework.Framework,
testSuite *junit.TestSuite,
upgCtx *upgrades.UpgradeContext,
upgradeType upgrades.UpgradeType,
upgradeFunc func(),
) {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
cm := chaosmonkey.New(upgradeFunc)
for _, t := range upgradeTests {
testCase := &junit.TestCase{
Name: t.Name(),
Classname: "upgrade_tests",
}
testSuite.TestCases = append(testSuite.TestCases, testCase)
cma := chaosMonkeyAdapter{
test: t,
testReport: testCase,
framework: testFrameworks[t.Name()],
upgradeType: upgradeType,
upgCtx: *upgCtx,
}
cm.Register(cma.Test)
}
start := time.Now()
defer func() {
testSuite.Update()
testSuite.Time = time.Since(start).Seconds()
if framework.TestContext.ReportDir != "" {
fname := filepath.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%supgrades.xml", framework.TestContext.ReportPrefix))
f, err := os.Create(fname)
if err != nil {
return
}
defer f.Close()
xml.NewEncoder(f).Encode(testSuite)
}
}()
cm.Do()
}
func getUpgradeContext(c discovery.DiscoveryInterface, upgradeTarget string) (*upgrades.UpgradeContext, error) {
current, err := c.ServerVersion()
if err != nil {

View File

@ -237,7 +237,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
It("should be restarted with a docker exec liveness probe with timeout [Conformance]", func() {
// TODO: enable this test once the default exec handler supports timeout.
Skip("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
framework.Skipf("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
runLivenessTest(f, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "liveness-exec",

View File

@ -196,7 +196,7 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.PodStartTimeout):
Fail("Timeout while waiting for pod creation")
framework.Failf("Timeout while waiting for pod creation")
}
// We need to wait for the pod to be running, otherwise the deletion
@ -245,14 +245,14 @@ var _ = framework.KubeDescribe("Pods", func() {
deleted = true
case watch.Error:
framework.Logf("received a watch error: %v", event.Object)
Fail("watch closed with error")
framework.Failf("watch closed with error")
}
case <-timer:
Fail("timed out waiting for pod deletion")
framework.Failf("timed out waiting for pod deletion")
}
}
if !deleted {
Fail("Failed to observe pod deletion")
framework.Failf("Failed to observe pod deletion")
}
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())

View File

@ -44,6 +44,7 @@ import (
"k8s.io/kubernetes/pkg/util/logs"
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
"k8s.io/kubernetes/test/e2e/generated"
federationtest "k8s.io/kubernetes/test/e2e_federation"
testutils "k8s.io/kubernetes/test/utils"
@ -318,7 +319,7 @@ func RunE2ETests(t *testing.T) {
logs.InitLogs()
defer logs.FlushLogs()
gomega.RegisterFailHandler(ginkgo.Fail)
gomega.RegisterFailHandler(ginkgowrapper.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`

View File

@ -86,6 +86,7 @@ go_library(
"//pkg/volume/util/volumehelper:go_default_library",
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/perftype:go_default_library",
"//test/utils:go_default_library",
@ -145,6 +146,9 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
srcs = [
":package-srcs",
"//test/e2e/framework/ginkgowrapper:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -0,0 +1,28 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["wrapper.go"],
tags = ["automanaged"],
deps = ["//vendor/github.com/onsi/ginkgo:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,134 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic
// with structured data instead of a constant string.
package ginkgowrapper
import (
"bufio"
"bytes"
"regexp"
"runtime"
"runtime/debug"
"strings"
"github.com/onsi/ginkgo"
)
// FailurePanic is the value that will be panicked from Fail.
type FailurePanic struct {
Message string // The failure message passed to Fail
Filename string // The filename that is the source of the failure
Line int // The line number of the filename that is the source of the failure
FullStackTrace string // A full stack trace starting at the source of the failure
}
// String makes FailurePanic look like the old Ginkgo panic when printed.
func (FailurePanic) String() string { return ginkgo.GINKGO_PANIC }
// Fail wraps ginkgo.Fail so that it panics with more useful
// information about the failure. This function will panic with a
// FailurePanic.
func Fail(message string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
_, file, line, _ := runtime.Caller(skip)
fp := FailurePanic{
Message: message,
Filename: file,
Line: line,
FullStackTrace: pruneStack(skip),
}
defer func() {
e := recover()
if e != nil {
panic(fp)
}
}()
ginkgo.Fail(message, skip)
}
// SkipPanic is the value that will be panicked from Skip.
type SkipPanic struct {
Message string // The failure message passed to Fail
Filename string // The filename that is the source of the failure
Line int // The line number of the filename that is the source of the failure
FullStackTrace string // A full stack trace starting at the source of the failure
}
// String makes SkipPanic look like the old Ginkgo panic when printed.
func (SkipPanic) String() string { return ginkgo.GINKGO_PANIC }
// Skip wraps ginkgo.Skip so that it panics with more useful
// information about why the test is being skipped. This function will
// panic with a SkipPanic.
func Skip(message string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
_, file, line, _ := runtime.Caller(skip)
sp := SkipPanic{
Message: message,
Filename: file,
Line: line,
FullStackTrace: pruneStack(skip),
}
defer func() {
e := recover()
if e != nil {
panic(sp)
}
}()
ginkgo.Skip(message, skip)
}
// ginkgo adds a lot of test running infrastructure to the stack, so
// we filter those out
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`)
func pruneStack(skip int) string {
skip += 2 // one for pruneStack and one for debug.Stack
stack := debug.Stack()
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
var prunedStack []string
// skip the top of the stack
for i := 0; i < 2*skip+1; i++ {
scanner.Scan()
}
for scanner.Scan() {
if stackSkipPattern.Match(scanner.Bytes()) {
scanner.Scan() // these come in pairs
} else {
prunedStack = append(prunedStack, scanner.Text())
scanner.Scan() // these come in pairs
prunedStack = append(prunedStack, scanner.Text())
}
}
return strings.Join(prunedStack, "\n")
}

View File

@ -98,6 +98,7 @@ import (
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
testutil "k8s.io/kubernetes/test/utils"
)
@ -293,13 +294,13 @@ func Failf(format string, args ...interface{}) {
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Fail(nowStamp()+": "+msg, 1+offset)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Skip(nowStamp() + ": " + msg)
ginkgowrapper.Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {

View File

@ -958,7 +958,7 @@ metadata:
return false, nil
}
if len(uidToPort) > 1 {
Fail("Too many endpoints found")
framework.Failf("Too many endpoints found")
}
for _, port := range uidToPort {
if port[0] != redisPort {

View File

@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("PodPreset", func() {
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.PodStartTimeout):
Fail("Timeout while waiting for pod creation")
framework.Failf("Timeout while waiting for pod creation")
}
// We need to wait for the pod to be running, otherwise the deletion
@ -233,7 +233,7 @@ var _ = framework.KubeDescribe("PodPreset", func() {
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.PodStartTimeout):
Fail("Timeout while waiting for pod creation")
framework.Failf("Timeout while waiting for pod creation")
}
// We need to wait for the pod to be running, otherwise the deletion

View File

@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.PodStartTimeout):
Fail("Timeout while waiting for pod creation")
framework.Failf("Timeout while waiting for pod creation")
}
// We need to wait for the pod to be running, otherwise the deletion
@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
}
}
if !deleted {
Fail("Failed to observe pod deletion")
framework.Failf("Failed to observe pod deletion")
}
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())

View File

@ -269,7 +269,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
}
Fail(strings.Join(errs, "\n"))
framework.Failf(strings.Join(errs, "\n"))
}
})
})

View File

@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
}
}
if node == nil {
Fail("unable to select a non-master node")
framework.Failf("unable to select a non-master node")
}
}

View File

@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() {
}
if n < 2 {
failing.Insert("Less than two runs succeeded; aborting.")
Fail(strings.Join(failing.List(), "\n"))
framework.Failf(strings.Join(failing.List(), "\n"))
}
percentile := func(p int) time.Duration {
est := n * p / 100
@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() {
if failing.Len() > 0 {
errList := strings.Join(failing.List(), "\n")
helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
Fail(errList + helpfulInfo)
framework.Failf(errList + helpfulInfo)
}
})
})

View File

@ -59,6 +59,9 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
srcs = [
":package-srcs",
"//test/utils/junit:all-srcs",
],
tags = ["automanaged"],
)

27
test/utils/junit/BUILD Normal file
View File

@ -0,0 +1,27 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["junit.go"],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

104
test/utils/junit/junit.go Normal file
View File

@ -0,0 +1,104 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package junit provides data structures to allow easy XML encoding
// and decoding of JUnit test results.
package junit
import (
"encoding/xml"
"time"
)
// TestSuite is a top-level test suite containing test cases.
type TestSuite struct {
XMLName xml.Name `xml:"testsuite"`
Name string `xml:"name,attr"`
Tests int `xml:"tests,attr"`
Disabled int `xml:"disabled,attr,omitempty"`
Errors int `xml:"errors,attr"`
Failures int `xml:"failures,attr"`
Skipped int `xml:"skipped,attr,omitempty"`
Time float64 `xml:"time,attr"`
Timestamp time.Time `xml:"timestamp,attr"`
ID int `xml:"id,attr,omitempty"`
Package string `xml:"package,attr,omitempty"`
Hostname string `xml:"hostname,attr"`
Properties []*Property `xml:"properties,omitempty"`
TestCases []*TestCase `xml:"testcase"`
SystemOut string `xml:"system-out,omitempty"`
SystemErr string `xml:"system-err,omitempty"`
}
// Update iterates through the TestCases and updates Tests, Errors,
// Failures, and Skipped top level attributes.
func (t *TestSuite) Update() {
t.Tests = len(t.TestCases)
for _, tc := range t.TestCases {
t.Errors += len(tc.Errors)
t.Failures += len(tc.Failures)
if len(tc.Skipped) > 0 {
t.Skipped++
}
}
}
// Property is a simple key-value property that can be attached to a TestSuite.
type Property struct {
XMLName xml.Name `xml:"property"`
Name string `xml:"name,attr"`
Value string `xml:"value,attr"`
}
// Error represents the errors in a test case.
type Error struct {
XMLName xml.Name `xml:"error"`
Message string `xml:"message,attr,omitempty"`
Type string `xml:"type,attr"`
Value string `xml:",cdata"`
}
// Failure represents the failures in a test case.
type Failure struct {
XMLName xml.Name `xml:"failure"`
Message string `xml:"message,attr,omitempty"`
Type string `xml:"type,attr"`
Value string `xml:",cdata"`
}
// TestCase represents a single test case within a suite.
type TestCase struct {
XMLName xml.Name `xml:"testcase"`
Name string `xml:"name,attr"`
Classname string `xml:"classname,attr"`
Status string `xml:"status,attr,omitempty"`
Assertions int `xml:"assertions,attr,omitempty"`
Time float64 `xml:"time,attr"`
Skipped string `xml:"skipped,omitempty"`
Errors []*Error `xml:"error,omitempty"`
Failures []*Failure `xml:"failure,omitempty"`
}