mirror of https://github.com/k3s-io/k3s
Remove test/integration/* from hack/.golint_failures
parent
9fd7a4c2dc
commit
75df4dc04c
|
@ -669,12 +669,4 @@ test/e2e_node/environment
|
|||
test/e2e_node/remote
|
||||
test/e2e_node/runner/remote
|
||||
test/e2e_node/services
|
||||
test/integration
|
||||
test/integration/auth
|
||||
test/integration/evictions
|
||||
test/integration/framework
|
||||
test/integration/master
|
||||
test/integration/replicaset
|
||||
test/integration/replicationcontroller
|
||||
test/integration/scheduler
|
||||
test/utils
|
||||
|
|
|
@ -228,7 +228,7 @@ var aBinding string = `
|
|||
}
|
||||
`
|
||||
|
||||
var emptyEndpoints string = `
|
||||
var emptyEndpoints = `
|
||||
{
|
||||
"kind": "Endpoints",
|
||||
"apiVersion": "v1",
|
||||
|
@ -493,10 +493,10 @@ func parseResourceVersion(response []byte) (string, float64, error) {
|
|||
}
|
||||
|
||||
func getPreviousResourceVersionKey(url, id string) string {
|
||||
baseUrl := strings.Split(url, "?")[0]
|
||||
key := baseUrl
|
||||
baseURL := strings.Split(url, "?")[0]
|
||||
key := baseURL
|
||||
if id != "" {
|
||||
key = fmt.Sprintf("%s/%v", baseUrl, id)
|
||||
key = fmt.Sprintf("%s/%v", baseURL, id)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
@ -1075,8 +1075,8 @@ func TestKindAuthorization(t *testing.T) {
|
|||
if r.verb == "PUT" && r.body != "" {
|
||||
// For update operations, insert previous resource version
|
||||
if resVersion := previousResourceVersion[getPreviousResourceVersionKey(r.URL, "")]; resVersion != 0 {
|
||||
resourceVersionJson := fmt.Sprintf(",\r\n\"resourceVersion\": \"%v\"", resVersion)
|
||||
bodyStr = fmt.Sprintf(r.body, resourceVersionJson)
|
||||
resourceVersionJSON := fmt.Sprintf(",\r\n\"resourceVersion\": \"%v\"", resVersion)
|
||||
bodyStr = fmt.Sprintf(r.body, resourceVersionJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ func (b bootstrapSecrets) Get(name string) (*corev1.Secret, error) {
|
|||
|
||||
// TestBootstrapTokenAuth tests the bootstrap token auth provider
|
||||
func TestBootstrapTokenAuth(t *testing.T) {
|
||||
tokenId, err := bootstraputil.GenerateTokenId()
|
||||
tokenID, err := bootstraputil.GenerateTokenId()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
|||
},
|
||||
Type: corev1.SecretTypeBootstrapToken,
|
||||
Data: map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte(secret),
|
||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||
},
|
||||
|
@ -74,7 +74,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
|||
},
|
||||
Type: corev1.SecretTypeBootstrapToken,
|
||||
Data: map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||
},
|
||||
|
@ -86,7 +86,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
|||
},
|
||||
Type: corev1.SecretTypeBootstrapToken,
|
||||
Data: map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||
bootstrapapi.BootstrapTokenExpirationKey: []byte(bootstraputil.TimeStringFromNow(-time.Hour)),
|
||||
|
@ -134,7 +134,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
|||
previousResourceVersion := make(map[string]float64)
|
||||
transport := http.DefaultTransport
|
||||
|
||||
token := tokenId + "." + secret
|
||||
token := tokenID + "." + secret
|
||||
var bodyStr string
|
||||
if test.request.body != "" {
|
||||
sub := ""
|
||||
|
|
|
@ -95,7 +95,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
|||
|
||||
waitPDBStable(t, clientSet, numOfEvictions, ns.Name, pdb.Name)
|
||||
|
||||
var numberPodsEvicted uint32 = 0
|
||||
var numberPodsEvicted uint32
|
||||
errCh := make(chan error, 3*numOfEvictions)
|
||||
var wg sync.WaitGroup
|
||||
// spawn numOfEvictions goroutines to concurrently evict the pods
|
||||
|
|
|
@ -103,11 +103,13 @@ type MasterHolder struct {
|
|||
M *master.Master
|
||||
}
|
||||
|
||||
// SetMaster assigns the current master.
|
||||
func (h *MasterHolder) SetMaster(m *master.Master) {
|
||||
h.M = m
|
||||
close(h.Initialized)
|
||||
}
|
||||
|
||||
// DefaultOpenAPIConfig returns an openapicommon.Config initialized to default values.
|
||||
func DefaultOpenAPIConfig() *openapicommon.Config {
|
||||
openAPIConfig := genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme))
|
||||
openAPIConfig.Info = &spec.Info{
|
||||
|
@ -237,7 +239,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
|||
return m, s, closeFn
|
||||
}
|
||||
|
||||
// Returns the master config appropriate for most integration tests.
|
||||
// NewIntegrationTestMasterConfig returns the master config appropriate for most integration tests.
|
||||
func NewIntegrationTestMasterConfig() *master.Config {
|
||||
masterConfig := NewMasterConfig()
|
||||
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
|
||||
|
@ -249,7 +251,7 @@ func NewIntegrationTestMasterConfig() *master.Config {
|
|||
return masterConfig
|
||||
}
|
||||
|
||||
// Returns a basic master config.
|
||||
// NewMasterConfig returns a basic master config.
|
||||
func NewMasterConfig() *master.Config {
|
||||
// This causes the integration tests to exercise the etcd
|
||||
// prefix code, so please don't change without ensuring
|
||||
|
@ -336,6 +338,7 @@ func NewMasterConfig() *master.Config {
|
|||
// CloseFunc can be called to cleanup the master
|
||||
type CloseFunc func()
|
||||
|
||||
// RunAMaster starts a master with the provided config.
|
||||
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) {
|
||||
if masterConfig == nil {
|
||||
masterConfig = NewMasterConfig()
|
||||
|
@ -344,6 +347,7 @@ func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server,
|
|||
return startMasterOrDie(masterConfig, nil, nil)
|
||||
}
|
||||
|
||||
// RunAMasterUsingServer starts up a master using the provided config on the specified server.
|
||||
func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {
|
||||
return startMasterOrDie(masterConfig, s, masterReceiver)
|
||||
}
|
||||
|
|
|
@ -31,12 +31,14 @@ const (
|
|||
retries = 5
|
||||
)
|
||||
|
||||
// IntegrationTestNodePreparer holds configuration information for the test node preparer.
|
||||
type IntegrationTestNodePreparer struct {
|
||||
client clientset.Interface
|
||||
countToStrategy []testutils.CountToStrategy
|
||||
nodeNamePrefix string
|
||||
}
|
||||
|
||||
// NewIntegrationTestNodePreparer creates an IntegrationTestNodePreparer configured with defaults.
|
||||
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
|
||||
return &IntegrationTestNodePreparer{
|
||||
client: client,
|
||||
|
@ -45,6 +47,7 @@ func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy
|
|||
}
|
||||
}
|
||||
|
||||
// PrepareNodes prepares countToStrategy test nodes.
|
||||
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
numNodes := 0
|
||||
for _, v := range p.countToStrategy {
|
||||
|
@ -96,6 +99,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// CleanupNodes deletes existing test nodes.
|
||||
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||
for i := range nodes.Items {
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/master"
|
||||
)
|
||||
|
||||
// TestServerSetup holds configuration information for a kube-apiserver test server.
|
||||
type TestServerSetup struct {
|
||||
ModifyServerRunOptions func(*options.ServerRunOptions)
|
||||
ModifyServerConfig func(*master.Config)
|
||||
|
|
|
@ -33,6 +33,7 @@ const (
|
|||
currentPodInfraContainerImageVersion = "3.1"
|
||||
)
|
||||
|
||||
// CreateTestingNamespace creates a namespace for testing.
|
||||
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *v1.Namespace {
|
||||
// TODO: Create a namespace with a given basename.
|
||||
// Currently we neither create the namespace nor delete all of its contents at the end.
|
||||
|
@ -45,6 +46,7 @@ func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *test
|
|||
}
|
||||
}
|
||||
|
||||
// DeleteTestingNamespace is currently a no-op function.
|
||||
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
|
||||
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ type base64Plugin struct {
|
|||
encryptRequest chan *kmsapi.EncryptRequest
|
||||
}
|
||||
|
||||
func NewBase64Plugin() (*base64Plugin, error) {
|
||||
func newBase64Plugin() (*base64Plugin, error) {
|
||||
listener, err := net.Listen(unixProtocol, sockFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to listen on the unix socket, error: %v", err)
|
||||
|
|
|
@ -80,7 +80,7 @@ func (r rawDEKKEKSecret) getPayload() []byte {
|
|||
// 4. The payload (ex. Secret) should be encrypted via AES CBC transform
|
||||
// 5. Prefix-EncryptedDEK-EncryptedPayload structure should be deposited to ETCD
|
||||
func TestKMSProvider(t *testing.T) {
|
||||
pluginMock, err := NewBase64Plugin()
|
||||
pluginMock, err := newBase64Plugin()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock of KMS Plugin: %v", err)
|
||||
}
|
||||
|
|
|
@ -353,7 +353,7 @@ func TestWatchSucceedsWithoutArgs(t *testing.T) {
|
|||
resp.Body.Close()
|
||||
}
|
||||
|
||||
var hpaV1 string = `
|
||||
var hpaV1 = `
|
||||
{
|
||||
"apiVersion": "autoscaling/v1",
|
||||
"kind": "HorizontalPodAutoscaler",
|
||||
|
@ -374,7 +374,7 @@ var hpaV1 string = `
|
|||
}
|
||||
`
|
||||
|
||||
var deploymentExtensions string = `
|
||||
var deploymentExtensions = `
|
||||
{
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"kind": "Deployment",
|
||||
|
@ -401,7 +401,7 @@ var deploymentExtensions string = `
|
|||
}
|
||||
`
|
||||
|
||||
var deploymentApps string = `
|
||||
var deploymentApps = `
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
|
|
|
@ -448,12 +448,14 @@ func TestAdoption(t *testing.T) {
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if e, a := tc.expectedOwnerReferences(rs), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
|
||||
|
||||
e, a := tc.expectedOwnerReferences(rs), updatedPod.OwnerReferences
|
||||
if reflect.DeepEqual(e, a) {
|
||||
return true, nil
|
||||
} else {
|
||||
}
|
||||
|
||||
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatalf("test %q failed: %v", tc.name, err)
|
||||
}
|
||||
|
|
|
@ -448,12 +448,14 @@ func TestAdoption(t *testing.T) {
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if e, a := tc.expectedOwnerReferences(rc), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
|
||||
|
||||
e, a := tc.expectedOwnerReferences(rc), updatedPod.OwnerReferences
|
||||
if reflect.DeepEqual(e, a) {
|
||||
return true, nil
|
||||
} else {
|
||||
}
|
||||
|
||||
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatalf("test %q failed: %v", tc.name, err)
|
||||
}
|
||||
|
|
|
@ -156,7 +156,8 @@ func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.Extend
|
|||
|
||||
if e.nodeCacheCapable {
|
||||
return e.filterUsingNodeCache(args)
|
||||
} else {
|
||||
}
|
||||
|
||||
for _, node := range args.Nodes.Items {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
|
@ -187,7 +188,6 @@ func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.Extend
|
|||
FailedNodes: failedNodesMap,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
|
@ -234,21 +234,21 @@ func (e *Extender) Bind(binding *schedulerapi.ExtenderBindingArgs) error {
|
|||
return e.Client.CoreV1().Pods(b.Namespace).Bind(b)
|
||||
}
|
||||
|
||||
func machine_1_2_3_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
func machine1_2_3Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_3_5_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
func machine2_3_5Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
func machine2Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
|
@ -263,7 +263,7 @@ func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostP
|
|||
return &result, nil
|
||||
}
|
||||
|
||||
func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
func machine3Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
|
@ -284,8 +284,8 @@ func TestSchedulerExtender(t *testing.T) {
|
|||
|
||||
extender1 := &Extender{
|
||||
name: "extender1",
|
||||
predicates: []fitPredicate{machine_1_2_3_Predicate},
|
||||
prioritizers: []priorityConfig{{machine_2_Prioritizer, 1}},
|
||||
predicates: []fitPredicate{machine1_2_3Predicate},
|
||||
prioritizers: []priorityConfig{{machine2Prioritizer, 1}},
|
||||
}
|
||||
es1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
extender1.serveHTTP(t, w, req)
|
||||
|
@ -294,8 +294,8 @@ func TestSchedulerExtender(t *testing.T) {
|
|||
|
||||
extender2 := &Extender{
|
||||
name: "extender2",
|
||||
predicates: []fitPredicate{machine_2_3_5_Predicate},
|
||||
prioritizers: []priorityConfig{{machine_3_Prioritizer, 1}},
|
||||
predicates: []fitPredicate{machine2_3_5Predicate},
|
||||
prioritizers: []priorityConfig{{machine3Prioritizer, 1}},
|
||||
Client: clientSet,
|
||||
}
|
||||
es2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
|
@ -305,8 +305,8 @@ func TestSchedulerExtender(t *testing.T) {
|
|||
|
||||
extender3 := &Extender{
|
||||
name: "extender3",
|
||||
predicates: []fitPredicate{machine_1_2_3_Predicate},
|
||||
prioritizers: []priorityConfig{{machine_2_Prioritizer, 5}},
|
||||
predicates: []fitPredicate{machine1_2_3Predicate},
|
||||
prioritizers: []priorityConfig{{machine2Prioritizer, 5}},
|
||||
nodeCacheCapable: true,
|
||||
}
|
||||
es3 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
|
|
|
@ -57,7 +57,7 @@ func (tp *TesterPlugin) Reserve(ps plugins.PluginSet, pod *v1.Pod, nodeName stri
|
|||
|
||||
// Prebind is a test function that returns (true, nil) or errors for testing.
|
||||
func (tp *TesterPlugin) Prebind(ps plugins.PluginSet, pod *v1.Pod, nodeName string) (bool, error) {
|
||||
var err error = nil
|
||||
var err error
|
||||
tp.numPrebindCalled++
|
||||
if tp.failPrebind {
|
||||
err = fmt.Errorf("injecting failure for pod %v", pod.Name)
|
||||
|
|
|
@ -373,7 +373,7 @@ func TestDisablePreemption(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace int64) *v1.Pod {
|
||||
func mkPriorityPodWithGrace(tc *testContext, name string, priority int32, grace int64) *v1.Pod {
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
|
||||
|
|
|
@ -45,6 +45,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
// Register defaults in pkg/scheduler/algorithmprovider.
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
|
@ -54,7 +55,7 @@ import (
|
|||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type TestContext struct {
|
||||
type testContext struct {
|
||||
closeFn framework.CloseFunc
|
||||
httpServer *httptest.Server
|
||||
ns *v1.Namespace
|
||||
|
@ -97,8 +98,8 @@ func createConfiguratorWithPodInformer(
|
|||
|
||||
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
||||
// configuration.
|
||||
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
||||
context := TestContext{
|
||||
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *testContext {
|
||||
context := testContext{
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
|
@ -139,10 +140,10 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
|
|||
// configuration.
|
||||
func initTestScheduler(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
context *testContext,
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *TestContext {
|
||||
) *testContext {
|
||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||
// feature gate is enabled at the same time.
|
||||
return initTestSchedulerWithOptions(t, context, setPodInformer, policy, nil, false, time.Second)
|
||||
|
@ -152,13 +153,13 @@ func initTestScheduler(
|
|||
// configuration and other options.
|
||||
func initTestSchedulerWithOptions(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
context *testContext,
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
pluginSet plugins.PluginSet,
|
||||
disablePreemption bool,
|
||||
resyncPeriod time.Duration,
|
||||
) *TestContext {
|
||||
) *testContext {
|
||||
// 1. Create scheduler
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod)
|
||||
|
||||
|
@ -234,7 +235,7 @@ func initTestSchedulerWithOptions(
|
|||
|
||||
// initDisruptionController initializes and runs a Disruption Controller to properly
|
||||
// update PodDisuptionBudget objects.
|
||||
func initDisruptionController(context *TestContext) *disruption.DisruptionController {
|
||||
func initDisruptionController(context *testContext) *disruption.DisruptionController {
|
||||
informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour)
|
||||
|
||||
dc := disruption.NewDisruptionController(
|
||||
|
@ -254,20 +255,20 @@ func initDisruptionController(context *TestContext) *disruption.DisruptionContro
|
|||
|
||||
// initTest initializes a test environment and creates master and scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
func initTest(t *testing.T, nsPrefix string) *testContext {
|
||||
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), true, nil)
|
||||
}
|
||||
|
||||
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
|
||||
// configuration but with pod preemption disabled.
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *testContext {
|
||||
return initTestSchedulerWithOptions(
|
||||
t, initTestMaster(t, nsPrefix, nil), true, nil, nil, true, time.Second)
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
// at the end of a test.
|
||||
func cleanupTest(t *testing.T, context *TestContext) {
|
||||
func cleanupTest(t *testing.T, context *testContext) {
|
||||
// Kill the scheduler.
|
||||
close(context.stopCh)
|
||||
// Cleanup nodes.
|
||||
|
@ -666,7 +667,7 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
|||
|
||||
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
|
||||
// the expected values.
|
||||
func waitForPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
func waitForPDBsStable(context *testContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
pdbList, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -694,7 +695,7 @@ func waitForPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget,
|
|||
}
|
||||
|
||||
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
||||
func waitCachedPodsStable(context *TestContext, pods []*v1.Pod) error {
|
||||
func waitCachedPodsStable(context *testContext, pods []*v1.Pod) error {
|
||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||
cachedPods, err := context.scheduler.Config().SchedulerCache.List(labels.Everything())
|
||||
if err != nil {
|
||||
|
|
|
@ -922,7 +922,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
|
|||
}
|
||||
}
|
||||
|
||||
func initPVController(context *TestContext, provisionDelaySeconds int) (*persistentvolume.PersistentVolumeController, informers.SharedInformerFactory, error) {
|
||||
func initPVController(context *testContext, provisionDelaySeconds int) (*persistentvolume.PersistentVolumeController, informers.SharedInformerFactory, error) {
|
||||
clientset := context.clientSet
|
||||
// Informers factory for controllers, we disable resync period for testing.
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/coreos/etcd/pkg/transport"
|
||||
)
|
||||
|
||||
// DeletePodOrErrorf deletes a pod or fails with a call to t.Errorf.
|
||||
func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||
if err := c.CoreV1().Pods(ns).Delete(name, nil); err != nil {
|
||||
t.Errorf("unable to delete pod %v: %v", name, err)
|
||||
|
@ -39,17 +40,19 @@ func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
|||
|
||||
// Requests to try. Each one should be forbidden or not forbidden
|
||||
// depending on the authentication and authorization setup of the master.
|
||||
var Code200 = map[int]bool{200: true}
|
||||
var Code201 = map[int]bool{201: true}
|
||||
var Code400 = map[int]bool{400: true}
|
||||
var Code401 = map[int]bool{401: true}
|
||||
var Code403 = map[int]bool{403: true}
|
||||
var Code404 = map[int]bool{404: true}
|
||||
var Code405 = map[int]bool{405: true}
|
||||
var Code409 = map[int]bool{409: true}
|
||||
var Code422 = map[int]bool{422: true}
|
||||
var Code500 = map[int]bool{500: true}
|
||||
var Code503 = map[int]bool{503: true}
|
||||
var (
|
||||
Code200 = map[int]bool{200: true}
|
||||
Code201 = map[int]bool{201: true}
|
||||
Code400 = map[int]bool{400: true}
|
||||
Code401 = map[int]bool{401: true}
|
||||
Code403 = map[int]bool{403: true}
|
||||
Code404 = map[int]bool{404: true}
|
||||
Code405 = map[int]bool{405: true}
|
||||
Code409 = map[int]bool{409: true}
|
||||
Code422 = map[int]bool{422: true}
|
||||
Code500 = map[int]bool{500: true}
|
||||
Code503 = map[int]bool{503: true}
|
||||
)
|
||||
|
||||
// WaitForPodToDisappear polls the API server if the pod has been deleted.
|
||||
func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error {
|
||||
|
@ -57,16 +60,15 @@ func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, in
|
|||
_, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
} else {
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
} else {
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetEtcdClients returns an initialized clientv3.Client and clientv3.KV.
|
||||
func GetEtcdClients(config storagebackend.TransportConfig) (*clientv3.Client, clientv3.KV, error) {
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: config.CertFile,
|
||||
|
|
Loading…
Reference in New Issue