Merge pull request #69105 from pohly/e2e-settings

E2E settings
pull/58/head
k8s-ci-robot 2018-10-10 12:01:07 -07:00 committed by GitHub
commit 9c94363ef9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 802 additions and 133 deletions

View File

@ -19,6 +19,7 @@ go_test(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/framework/viperconfig:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/instrumentation:go_default_library",
"//test/e2e/kubectl:go_default_library",

View File

@ -17,10 +17,14 @@ limitations under the License.
package e2e
import (
"flag"
"fmt"
"os"
"testing"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/framework/viperconfig"
"k8s.io/kubernetes/test/e2e/generated"
// test sources
@ -42,8 +46,16 @@ import (
_ "k8s.io/kubernetes/test/e2e/ui"
)
var viperConfig = flag.String("viper-config", "", "The name of a viper config file (https://github.com/spf13/viper#what-is-viper). All e2e command line parameters can also be configured in such a file. May contain a path and may or may not contain the file suffix. The default is to look for an optional file with `e2e` as base name. If a file is specified explicitly, it must be present.")
func init() {
framework.ViperizeFlags()
// Register framework flags, then handle flags and Viper config.
framework.HandleFlags()
if err := viperconfig.ViperizeFlags(*viperConfig, "e2e"); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
framework.AfterReadingAllFlags(&framework.TestContext)
// TODO: Deprecating repo-root over time... instead just use gobindata_util.go , see #23987.
// Right now it is still needed, for example by

View File

@ -153,7 +153,6 @@ go_library(
"//vendor/github.com/onsi/gomega/types:go_default_library",
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/github.com/spf13/viper:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
@ -173,10 +172,12 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/framework/config:all-srcs",
"//test/e2e/framework/ginkgowrapper:all-srcs",
"//test/e2e/framework/metrics:all-srcs",
"//test/e2e/framework/testfiles:all-srcs",
"//test/e2e/framework/timer:all-srcs",
"//test/e2e/framework/viperconfig:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -0,0 +1,32 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["config.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/config",
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["config_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,234 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package config simplifies the declaration of configuration options.
// Right now the implementation maps them directly command line
// flags. When combined with test/e2e/framework/viper in a test suite,
// those flags then can also be read from a config file.
//
// Instead of defining flags one-by-one, developers annotate a
// structure with tags and then call a single function. This is the
// same approach as in https://godoc.org/github.com/jessevdk/go-flags,
// but implemented so that a test suite can continue to use the normal
// "flag" package.
//
// For example, a file storage/csi.go might define:
//
// var scaling struct {
// NumNodes int `default:"1" description:"number of nodes to run on"`
// Master string
// }
// _ = config.AddOptions(&scaling, "storage.csi.scaling")
//
// This defines the following command line flags:
//
// -storage.csi.scaling.numNodes=<int> - number of nodes to run on (default: 1)
// -storage.csi.scaling.master=<string>
//
// All fields in the structure must be exported and have one of the following
// types (same as in the `flag` package):
// - bool
// - time.Duration
// - float64
// - string
// - int
// - int64
// - uint
// - uint64
// - and/or nested or embedded structures containing those basic types.
//
// Each basic entry may have a tag with these optional keys:
//
// usage: additional explanation of the option
// default: the default value, in the same format as it would
// be given on the command line and true/false for
// a boolean
//
// The names of the final configuration options are a combination of an
// optional common prefix for all options in the structure and the
// name of the fields, concatenated with a dot. To get names that are
// consistent with the command line flags defined by `ginkgo`, the
// initial character of each field name is converted to lower case.
//
// There is currently no support for aliases, so renaming the fields
// or the common prefix will be visible to users of the test suite and
// may breaks scripts which use the old names.
//
// The variable will be filled with the actual values by the test
// suite before running tests. Beware that the code which registers
// Ginkgo tests cannot use those config options, because registering
// tests and options both run before the E2E test suite handles
// parameters.
package config
import (
"flag"
"fmt"
"reflect"
"strconv"
"time"
"unicode"
"unicode/utf8"
)
// CommandLine is the flag set that AddOptions adds to. Usually this
// is the same as the default in the flag package, but can also be
// something else (for example during testing).
var CommandLine = flag.CommandLine
// AddOptions analyzes the options value and creates the necessary
// flags to populate it.
//
// The prefix can be used to root the options deeper in the overall
// set of options, with a dot separating different levels.
//
// The function always returns true, to enable this simplified
// registration of options:
// _ = AddOptions(...)
//
// It panics when it encounters an error, like unsupported types
// or option name conflicts.
func AddOptions(options interface{}, prefix string) bool {
optionsType := reflect.TypeOf(options)
if optionsType == nil {
panic("options parameter without a type - nil?!")
}
if optionsType.Kind() != reflect.Ptr || optionsType.Elem().Kind() != reflect.Struct {
panic(fmt.Sprintf("need a pointer to a struct, got instead: %T", options))
}
addStructFields(optionsType.Elem(), reflect.Indirect(reflect.ValueOf(options)), prefix)
return true
}
func addStructFields(structType reflect.Type, structValue reflect.Value, prefix string) {
for i := 0; i < structValue.NumField(); i++ {
entry := structValue.Field(i)
addr := entry.Addr()
structField := structType.Field(i)
name := structField.Name
r, n := utf8.DecodeRuneInString(name)
name = string(unicode.ToLower(r)) + name[n:]
usage := structField.Tag.Get("usage")
def := structField.Tag.Get("default")
if prefix != "" {
name = prefix + "." + name
}
if structField.PkgPath != "" {
panic(fmt.Sprintf("struct entry %q not exported", name))
}
ptr := addr.Interface()
if structField.Anonymous {
// Entries in embedded fields are treated like
// entries, in the struct itself, i.e. we add
// them with the same prefix.
addStructFields(structField.Type, entry, prefix)
continue
}
if structField.Type.Kind() == reflect.Struct {
// Add nested options.
addStructFields(structField.Type, entry, name)
continue
}
// We could switch based on structField.Type. Doing a
// switch after getting an interface holding the
// pointer to the entry has the advantage that we
// immediately have something that we can add as flag
// variable.
//
// Perhaps generics will make this entire switch redundant someday...
switch ptr := ptr.(type) {
case *bool:
var defValue bool
parseDefault(&defValue, name, def)
CommandLine.BoolVar(ptr, name, defValue, usage)
case *time.Duration:
var defValue time.Duration
parseDefault(&defValue, name, def)
CommandLine.DurationVar(ptr, name, defValue, usage)
case *float64:
var defValue float64
parseDefault(&defValue, name, def)
CommandLine.Float64Var(ptr, name, defValue, usage)
case *string:
CommandLine.StringVar(ptr, name, def, usage)
case *int:
var defValue int
parseDefault(&defValue, name, def)
CommandLine.IntVar(ptr, name, defValue, usage)
case *int64:
var defValue int64
parseDefault(&defValue, name, def)
CommandLine.Int64Var(ptr, name, defValue, usage)
case *uint:
var defValue uint
parseDefault(&defValue, name, def)
CommandLine.UintVar(ptr, name, defValue, usage)
case *uint64:
var defValue uint64
parseDefault(&defValue, name, def)
CommandLine.Uint64Var(ptr, name, defValue, usage)
default:
panic(fmt.Sprintf("unsupported struct entry type %q: %T", name, entry.Interface()))
}
}
}
// parseDefault is necessary because "flag" wants the default in the
// actual type and cannot take a string. It would be nice to reuse the
// existing code for parsing from the "flag" package, but it isn't
// exported.
func parseDefault(value interface{}, name, def string) {
if def == "" {
return
}
checkErr := func(err error, value interface{}) {
if err != nil {
panic(fmt.Sprintf("invalid default %q for %T entry %s: %s", def, value, name, err))
}
}
switch value := value.(type) {
case *bool:
v, err := strconv.ParseBool(def)
checkErr(err, *value)
*value = v
case *time.Duration:
v, err := time.ParseDuration(def)
checkErr(err, *value)
*value = v
case *float64:
v, err := strconv.ParseFloat(def, 64)
checkErr(err, *value)
*value = v
case *int:
v, err := strconv.Atoi(def)
checkErr(err, *value)
*value = v
case *int64:
v, err := strconv.ParseInt(def, 0, 64)
checkErr(err, *value)
*value = v
case *uint:
v, err := strconv.ParseUint(def, 0, strconv.IntSize)
checkErr(err, *value)
*value = uint(v)
case *uint64:
v, err := strconv.ParseUint(def, 0, 64)
checkErr(err, *value)
*value = v
default:
panic(fmt.Sprintf("%q: setting defaults not supported for type %T", name, value))
}
}

View File

@ -0,0 +1,258 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"flag"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInt(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
var context struct {
Number int `default:"5" usage:"some number"`
}
require.NotPanics(t, func() {
AddOptions(&context, "")
})
require.Equal(t, []simpleFlag{
{
name: "number",
usage: "some number",
defValue: "5",
}},
allFlags(CommandLine))
assert.Equal(t, 5, context.Number)
}
func TestLower(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
var context struct {
Ähem string
MixedCase string
}
require.NotPanics(t, func() {
AddOptions(&context, "")
})
require.Equal(t, []simpleFlag{
{
name: "mixedCase",
},
{
name: "ähem",
},
},
allFlags(CommandLine))
}
func TestPrefix(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
var context struct {
Number int `usage:"some number"`
}
require.NotPanics(t, func() {
AddOptions(&context, "some.prefix")
})
require.Equal(t, []simpleFlag{
{
name: "some.prefix.number",
usage: "some number",
defValue: "0",
}},
allFlags(CommandLine))
}
func TestRecursion(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
type Nested struct {
Number1 int `usage:"embedded number"`
}
var context struct {
Nested
A struct {
B struct {
C struct {
Number2 int `usage:"some number"`
}
}
}
}
require.NotPanics(t, func() {
AddOptions(&context, "")
})
require.Equal(t, []simpleFlag{
{
name: "a.b.c.number2",
usage: "some number",
defValue: "0",
},
{
name: "number1",
usage: "embedded number",
defValue: "0",
},
},
allFlags(CommandLine))
}
func TestPanics(t *testing.T) {
assert.PanicsWithValue(t, `invalid default "a" for int entry prefix.number: strconv.Atoi: parsing "a": invalid syntax`, func() {
var context struct {
Number int `default:"a"`
}
AddOptions(&context, "prefix")
})
assert.PanicsWithValue(t, `invalid default "10000000000000000000" for int entry prefix.number: strconv.Atoi: parsing "10000000000000000000": value out of range`, func() {
var context struct {
Number int `default:"10000000000000000000"`
}
AddOptions(&context, "prefix")
})
assert.PanicsWithValue(t, `options parameter without a type - nil?!`, func() {
AddOptions(nil, "")
})
assert.PanicsWithValue(t, `need a pointer to a struct, got instead: *int`, func() {
number := 0
AddOptions(&number, "")
})
assert.PanicsWithValue(t, `struct entry "prefix.number" not exported`, func() {
var context struct {
number int
}
AddOptions(&context, "prefix")
})
assert.PanicsWithValue(t, `unsupported struct entry type "prefix.someNumber": config.MyInt`, func() {
type MyInt int
var context struct {
SomeNumber MyInt
}
AddOptions(&context, "prefix")
})
}
func TestTypes(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
type Context struct {
Bool bool `default:"true"`
Duration time.Duration `default:"1ms"`
Float64 float64 `default:"1.23456789"`
String string `default:"hello world"`
Int int `default:"-1" usage:"some number"`
Int64 int64 `default:"-1234567890123456789"`
Uint uint `default:"1"`
Uint64 uint64 `default:"1234567890123456789"`
}
var context Context
require.NotPanics(t, func() {
AddOptions(&context, "")
})
require.Equal(t, []simpleFlag{
{
name: "bool",
defValue: "true",
isBool: true,
},
{
name: "duration",
defValue: "1ms",
},
{
name: "float64",
defValue: "1.23456789",
},
{
name: "int",
usage: "some number",
defValue: "-1",
},
{
name: "int64",
defValue: "-1234567890123456789",
},
{
name: "string",
defValue: "hello world",
},
{
name: "uint",
defValue: "1",
},
{
name: "uint64",
defValue: "1234567890123456789",
},
},
allFlags(CommandLine))
assert.Equal(t,
Context{true, time.Millisecond, 1.23456789, "hello world",
-1, -1234567890123456789, 1, 1234567890123456789,
},
context,
"default values must match")
require.NoError(t, CommandLine.Parse([]string{
"-int", "-2",
"-int64", "-9123456789012345678",
"-uint", "2",
"-uint64", "9123456789012345678",
"-string", "pong",
"-float64", "-1.23456789",
"-bool=false",
"-duration=1s",
}))
assert.Equal(t,
Context{false, time.Second, -1.23456789, "pong",
-2, -9123456789012345678, 2, 9123456789012345678,
},
context,
"parsed values must match")
}
func allFlags(fs *flag.FlagSet) []simpleFlag {
var flags []simpleFlag
fs.VisitAll(func(f *flag.Flag) {
s := simpleFlag{
name: f.Name,
usage: f.Usage,
defValue: f.DefValue,
}
type boolFlag interface {
flag.Value
IsBoolFlag() bool
}
if fv, ok := f.Value.(boolFlag); ok && fv.IsBoolFlag() {
s.isBool = true
}
flags = append(flags, s)
})
return flags
}
type simpleFlag struct {
name string
usage string
defValue string
isBool bool
}

View File

@ -25,7 +25,6 @@ import (
"github.com/golang/glog"
"github.com/onsi/ginkgo/config"
"github.com/spf13/viper"
utilflag "k8s.io/apiserver/pkg/util/flag"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
@ -37,6 +36,34 @@ import (
const defaultHost = "http://127.0.0.1:8080"
// TestContextType contains test settings and global state. Due to
// historic reasons, it is a mixture of items managed by the test
// framework itself, cloud providers and individual tests.
// The goal is to move anything not required by the framework
// into the code which uses the settings.
//
// The recommendation for those settings is:
// - They are stored in their own context structure or local
// variables.
// - The standard `flag` package is used to register them.
// The flag name should follow the pattern <part1>.<part2>....<partn>
// where the prefix is unlikely to conflict with other tests or
// standard packages and each part is in lower camel case. For
// example, test/e2e/storage/csi/context.go could define
// storage.csi.numIterations.
// - framework/config can be used to simplify the registration of
// multiple options with a single function call:
// var storageCSI {
// NumIterations `default:"1" usage:"number of iterations"`
// }
// _ config.AddOptions(&storageCSI, "storage.csi")
// - The direct use Viper in tests is possible, but discouraged because
// it only works in test suites which use Viper (which is not
// required) and the supported options cannot be
// discovered by a test suite user.
//
// Test suite authors can use framework/viper to make all command line
// parameters also configurable via a configuration file.
type TestContextType struct {
KubeConfig string
KubemarkExternalKubeConfig string
@ -64,11 +91,9 @@ type TestContextType struct {
MinStartupPods int
// Timeout for waiting for system pods to be running
SystemPodsStartupTimeout time.Duration
UpgradeTarget string
EtcdUpgradeStorage string
EtcdUpgradeVersion string
IngressUpgradeImage string
UpgradeImage string
GCEUpgradeScript string
ContainerRuntime string
ContainerRuntimeEndpoint string
@ -115,8 +140,6 @@ type TestContextType struct {
FeatureGates map[string]bool
// Node e2e specific test context
NodeTestContextType
// Storage e2e specific test context
StorageTestContextType
// Monitoring solution that is used in current cluster.
ClusterMonitoringMode string
// Separate Prometheus monitoring deployed in cluster
@ -124,24 +147,6 @@ type TestContextType struct {
// Indicates what path the kubernetes-anywhere is installed on
KubernetesAnywherePath string
// Viper-only parameters. These will in time replace all flags.
// Example: Create a file 'e2e.json' with the following:
// "Cadvisor":{
// "MaxRetries":"6"
// }
Viper string
Cadvisor struct {
MaxRetries int
SleepDurationMS int
}
LoggingSoak struct {
Scale int
MilliSecondsBetweenWaves int
}
}
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
@ -164,14 +169,6 @@ type NodeTestContextType struct {
SystemSpecName string
}
// StorageConfig contains the shared settings for storage 2e2 tests.
type StorageTestContextType struct {
// CSIImageVersion overrides the builtin stable version numbers if set.
CSIImageVersion string
// CSIImageRegistry defines the image registry hosting the CSI container images.
CSIImageRegistry string
}
type CloudConfig struct {
ApiEndpoint string
ProjectID string
@ -225,7 +222,6 @@ func RegisterCommonFlags() {
flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
flag.Var(utilflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.")
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/remote).")
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.")
flag.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
@ -280,10 +276,8 @@ func RegisterClusterFlags() {
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
flag.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.UpgradeImage, "upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
@ -306,32 +300,11 @@ func RegisterNodeFlags() {
flag.StringVar(&TestContext.SystemSpecName, "system-spec-name", "", "The name of the system spec (e.g., gke) that's used in the node e2e test. The system specs are in test/e2e_node/system/specs/. This is used by the test framework to determine which tests to run for validating the system requirements.")
}
func RegisterStorageFlags() {
flag.StringVar(&TestContext.CSIImageVersion, "csiImageVersion", "", "overrides the default tag used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
flag.StringVar(&TestContext.CSIImageRegistry, "csiImageRegistry", "quay.io/k8scsi", "overrides the default repository used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
}
// ViperizeFlags sets up all flag and config processing. Future configuration info should be added to viper, not to flags.
func ViperizeFlags() {
// Part 1: Set regular flags.
// TODO: Future, lets eliminate e2e 'flag' deps entirely in favor of viper only,
// since go test 'flag's are sort of incompatible w/ flag, glog, etc.
// HandleFlags sets up all flags and parses the command line.
func HandleFlags() {
RegisterCommonFlags()
RegisterClusterFlags()
RegisterStorageFlags()
flag.Parse()
// Part 2: Set Viper provided flags.
// This must be done after common flags are registered, since Viper is a flag option.
viper.SetConfigName(TestContext.Viper)
viper.AddConfigPath(".")
viper.ReadInConfig()
// TODO Consider whether or not we want to use overwriteFlagsWithViperConfig().
viper.Unmarshal(&TestContext)
AfterReadingAllFlags(&TestContext)
}
func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {

View File

@ -0,0 +1,26 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["viperconfig.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/viperconfig",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/github.com/spf13/viper:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,142 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package viperconfig
import (
"flag"
"fmt"
"github.com/pkg/errors"
"path/filepath"
"github.com/spf13/viper"
)
const (
viperFileNotFound = "Unsupported Config Type \"\""
)
// ViperizeFlags checks whether a configuration file was specified, reads it, and updates
// the configuration variables accordingly. Must be called after framework.HandleFlags()
// and before framework.AfterReadingAllFlags().
//
// The logic is so that a required configuration file must be present. If empty,
// the optional configuration file is used instead, unless also empty.
//
// Files can be specified with just a base name ("e2e", matches "e2e.json/yaml/..." in
// the current directory) or with path and suffix.
func ViperizeFlags(requiredConfig, optionalConfig string) error {
viperConfig := optionalConfig
required := false
if requiredConfig != "" {
viperConfig = requiredConfig
required = true
}
if viperConfig == "" {
return nil
}
viper.SetConfigName(filepath.Base(viperConfig))
viper.AddConfigPath(filepath.Dir(viperConfig))
wrapError := func(err error) error {
if err == nil {
return nil
}
errorPrefix := fmt.Sprintf("viper config %q", viperConfig)
actualFile := viper.ConfigFileUsed()
if actualFile != "" && actualFile != viperConfig {
errorPrefix = fmt.Sprintf("%s = %q", errorPrefix, actualFile)
}
return errors.Wrap(err, errorPrefix)
}
if err := viper.ReadInConfig(); err != nil {
// If the user specified a file suffix, the Viper won't
// find the file because it always appends its known set
// of file suffices. Therefore try once more without
// suffix.
ext := filepath.Ext(viperConfig)
if ext != "" && err.Error() == viperFileNotFound {
viper.SetConfigName(filepath.Base(viperConfig[0 : len(viperConfig)-len(ext)]))
err = viper.ReadInConfig()
}
if err != nil {
// If a config was required, then parsing must
// succeed. This catches syntax errors and
// "file not found". Unfortunately error
// messages are sometimes hard to understand,
// so try to help the user a bit.
switch err.Error() {
case viperFileNotFound:
if required {
return wrapError(errors.New("not found or not using a supported file format"))
}
// Proceed without config.
return nil
default:
// Something isn't right in the file.
return wrapError(err)
}
}
}
// Update all flag values not already set with values found
// via Viper. We do this ourselves instead of calling
// something like viper.Unmarshal(&TestContext) because we
// want to support all values, regardless where they are
// stored.
return wrapError(viperUnmarshal())
}
// viperUnmarshall updates all command line flags with the corresponding values found
// via Viper, regardless whether the flag value is stored in TestContext, some other
// context or a local variable.
func viperUnmarshal() error {
var result error
set := make(map[string]bool)
// Determine which values were already set explicitly via
// flags. Those we don't overwrite because command line
// flags have a higher priority.
flag.Visit(func(f *flag.Flag) {
set[f.Name] = true
})
flag.VisitAll(func(f *flag.Flag) {
if result != nil ||
set[f.Name] ||
!viper.IsSet(f.Name) {
return
}
// In contrast to viper.Unmarshal(), values
// that have the wrong type (for example, a
// list instead of a plain string) will not
// trigger an error here. This could be fixed
// by checking the type ourselves, but
// probably isn't worth the effort.
//
// "%v" correctly turns bool, int, strings into
// the representation expected by flag, so those
// can be used in config files. Plain strings
// always work there, just as on the command line.
str := fmt.Sprintf("%v", viper.Get(f.Name))
if err := f.Value.Set(str); err != nil {
result = fmt.Errorf("setting option %q from config file value: %s", f.Name, err)
}
})
return result
}

View File

@ -15,6 +15,7 @@ go_library(
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/config:go_default_library",
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",

View File

@ -27,10 +27,17 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var loggingSoak struct {
Scale int `default:"1" usage:"number of waves of pods"`
TimeBetweenWaves time.Duration `default:"5000ms" usage:"time to wait before dumping the next wave of pods"`
}
var _ = config.AddOptions(&loggingSoak, "instrumentation.logging.soak")
var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("logging-soak")
@ -44,31 +51,12 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
// This can expose problems in your docker configuration (logging), log searching infrastructure, to tune deployments to match high load
// scenarios. TODO jayunit100 add this to the kube CI in a follow on infra patch.
// Returns scale (how many waves of pods).
// Returns wave interval (how many seconds to wait before dumping the next wave of pods).
readConfig := func() (int, time.Duration) {
// Read in configuration settings, reasonable defaults.
scale := framework.TestContext.LoggingSoak.Scale
if framework.TestContext.LoggingSoak.Scale == 0 {
scale = 1
framework.Logf("Overriding default scale value of zero to %d", scale)
}
milliSecondsBetweenWaves := framework.TestContext.LoggingSoak.MilliSecondsBetweenWaves
if milliSecondsBetweenWaves == 0 {
milliSecondsBetweenWaves = 5000
framework.Logf("Overriding default milliseconds value of zero to %d", milliSecondsBetweenWaves)
}
return scale, time.Duration(milliSecondsBetweenWaves) * time.Millisecond
}
scale, millisecondsBetweenWaves := readConfig()
It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v, scaling up to %v pods per node", kbRateInSeconds, totalLogTime, scale), func() {
It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v", kbRateInSeconds, totalLogTime), func() {
By(fmt.Sprintf("scaling up to %v pods per node", loggingSoak.Scale))
defer GinkgoRecover()
var wg sync.WaitGroup
wg.Add(scale)
for i := 0; i < scale; i++ {
wg.Add(loggingSoak.Scale)
for i := 0; i < loggingSoak.Scale; i++ {
go func() {
defer wg.Done()
defer GinkgoRecover()
@ -78,9 +66,9 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
framework.Logf("Completed logging soak, wave %v", i)
}()
// Niceness.
time.Sleep(millisecondsBetweenWaves)
time.Sleep(loggingSoak.TimeBetweenWaves)
}
framework.Logf("Waiting on all %v logging soak waves to complete", scale)
framework.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale)
wg.Wait()
})
})

View File

@ -36,6 +36,7 @@ go_library(
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/config:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/scheduling:go_default_library",

View File

@ -23,11 +23,18 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
. "github.com/onsi/ginkgo"
)
var cadvisor struct {
MaxRetries int `default:"6"`
SleepDuration time.Duration `default:"10000ms"`
}
var _ = config.AddOptions(&cadvisor, "instrumentation.monitoring.cadvisor")
var _ = instrumentation.SIGDescribe("Cadvisor", func() {
f := framework.NewDefaultFramework("cadvisor")
@ -44,25 +51,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
framework.ExpectNoError(err)
var errors []error
// returns maxRetries, sleepDuration
readConfig := func() (int, time.Duration) {
// Read in configuration settings, reasonable defaults.
retry := framework.TestContext.Cadvisor.MaxRetries
if framework.TestContext.Cadvisor.MaxRetries == 0 {
retry = 6
framework.Logf("Overriding default retry value of zero to %d", retry)
}
sleepDurationMS := framework.TestContext.Cadvisor.SleepDurationMS
if sleepDurationMS == 0 {
sleepDurationMS = 10000
framework.Logf("Overriding default milliseconds value of zero to %d", sleepDurationMS)
}
return retry, time.Duration(sleepDurationMS) * time.Millisecond
}
maxRetries, sleepDuration := readConfig()
maxRetries := cadvisor.MaxRetries
for {
errors = []error{}
for _, node := range nodeList.Items {
@ -82,7 +71,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
break
}
framework.Logf("failed to retrieve kubelet stats -\n %v", errors)
time.Sleep(sleepDuration)
time.Sleep(cadvisor.SleepDuration)
}
framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
}

View File

@ -18,6 +18,7 @@ package lifecycle
import (
"encoding/xml"
"flag"
"fmt"
"os"
"path/filepath"
@ -39,6 +40,11 @@ import (
. "github.com/onsi/ginkgo"
)
var (
upgradeTarget = flag.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
upgradeImage = flag.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
)
var upgradeTests = []upgrades.Test{
&upgrades.ServiceUpgradeTest{},
&upgrades.SecretUpgradeTest{},
@ -89,7 +95,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
testFrameworks := createUpgradeFrameworks(upgradeTests)
Describe("master upgrade", func() {
It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Master upgrade"}
@ -112,7 +118,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
Describe("node upgrade", func() {
It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Node upgrade"}
@ -125,7 +131,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
start := time.Now()
defer finalizeUpgradeTest(start, nodeUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.NodeUpgrade, upgradeFunc)
@ -134,7 +140,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
Describe("cluster upgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Cluster upgrade"}
@ -146,7 +152,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
@ -163,7 +169,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
Describe("cluster downgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Cluster downgrade"}
@ -175,7 +181,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
defer finalizeUpgradeTest(start, clusterDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
@ -268,7 +274,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
testFrameworks := createUpgradeFrameworks(gpuUpgradeTests)
Describe("master upgrade", func() {
It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "GPU master upgrade"}
@ -286,7 +292,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
})
Describe("cluster upgrade", func() {
It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "GPU cluster upgrade"}
@ -298,7 +304,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
@ -306,7 +312,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
})
Describe("cluster downgrade", func() {
It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "GPU cluster downgrade"}
@ -316,7 +322,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
start := time.Now()
defer finalizeUpgradeTest(start, gpuDowngradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
@ -334,7 +340,7 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
testFrameworks := createUpgradeFrameworks(statefulsetUpgradeTests)
framework.KubeDescribe("stateful upgrade", func() {
It("should maintain a functioning cluster", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Stateful upgrade"}
@ -346,7 +352,7 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, statefulsetUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
@ -365,7 +371,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
testFrameworks := createUpgradeFrameworks(kubeProxyUpgradeTests)
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "kube-proxy upgrade"}
@ -381,7 +387,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, true))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, true))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, true))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
@ -392,7 +398,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
testFrameworks := createUpgradeFrameworks(kubeProxyDowngradeTests)
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "kube-proxy downgrade"}
@ -407,7 +413,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
defer finalizeUpgradeTest(start, kubeProxyDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, false))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, false))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
@ -496,7 +502,7 @@ func runUpgradeSuite(
upgradeType upgrades.UpgradeType,
upgradeFunc func(),
) {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
cm := chaosmonkey.New(upgradeFunc)
@ -569,7 +575,7 @@ func getUpgradeContext(c discovery.DiscoveryInterface, upgradeTarget string) (*u
upgCtx.Versions = append(upgCtx.Versions, upgrades.VersionContext{
Version: *nextVer,
NodeImage: framework.TestContext.UpgradeImage,
NodeImage: *upgradeImage,
})
return upgCtx, nil

View File

@ -20,6 +20,7 @@ limitations under the License.
package storage
import (
"flag"
"fmt"
"io/ioutil"
"os"
@ -46,18 +47,22 @@ import (
csicrd "k8s.io/csi-api/pkg/crd"
)
var csiImageVersions = map[string]string{
"hostpathplugin": "v0.4.0",
"csi-attacher": "v0.4.0",
"csi-provisioner": "v0.4.0",
"driver-registrar": "v0.4.0",
}
var (
csiImageVersion = flag.String("storage.csi.image.version", "", "overrides the default tag used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
csiImageRegistry = flag.String("storage.csi.image.registry", "quay.io/k8scsi", "overrides the default repository used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
csiImageVersions = map[string]string{
"hostpathplugin": "v0.4.0",
"csi-attacher": "v0.4.0",
"csi-provisioner": "v0.4.0",
"driver-registrar": "v0.4.0",
}
)
func csiContainerImage(image string) string {
var fullName string
fullName += framework.TestContext.CSIImageRegistry + "/" + image + ":"
if framework.TestContext.CSIImageVersion != "" {
fullName += framework.TestContext.CSIImageVersion
fullName += *csiImageRegistry + "/" + image + ":"
if *csiImageVersion != "" {
fullName += *csiImageVersion
} else {
fullName += csiImageVersions[image]
}