mirror of https://github.com/k3s-io/k3s
Remove test/integration/* from hack/.golint_failures
parent
9fd7a4c2dc
commit
75df4dc04c
|
@ -669,12 +669,4 @@ test/e2e_node/environment
|
||||||
test/e2e_node/remote
|
test/e2e_node/remote
|
||||||
test/e2e_node/runner/remote
|
test/e2e_node/runner/remote
|
||||||
test/e2e_node/services
|
test/e2e_node/services
|
||||||
test/integration
|
|
||||||
test/integration/auth
|
|
||||||
test/integration/evictions
|
|
||||||
test/integration/framework
|
|
||||||
test/integration/master
|
|
||||||
test/integration/replicaset
|
|
||||||
test/integration/replicationcontroller
|
|
||||||
test/integration/scheduler
|
|
||||||
test/utils
|
test/utils
|
||||||
|
|
|
@ -228,7 +228,7 @@ var aBinding string = `
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
var emptyEndpoints string = `
|
var emptyEndpoints = `
|
||||||
{
|
{
|
||||||
"kind": "Endpoints",
|
"kind": "Endpoints",
|
||||||
"apiVersion": "v1",
|
"apiVersion": "v1",
|
||||||
|
@ -493,10 +493,10 @@ func parseResourceVersion(response []byte) (string, float64, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPreviousResourceVersionKey(url, id string) string {
|
func getPreviousResourceVersionKey(url, id string) string {
|
||||||
baseUrl := strings.Split(url, "?")[0]
|
baseURL := strings.Split(url, "?")[0]
|
||||||
key := baseUrl
|
key := baseURL
|
||||||
if id != "" {
|
if id != "" {
|
||||||
key = fmt.Sprintf("%s/%v", baseUrl, id)
|
key = fmt.Sprintf("%s/%v", baseURL, id)
|
||||||
}
|
}
|
||||||
return key
|
return key
|
||||||
}
|
}
|
||||||
|
@ -1075,8 +1075,8 @@ func TestKindAuthorization(t *testing.T) {
|
||||||
if r.verb == "PUT" && r.body != "" {
|
if r.verb == "PUT" && r.body != "" {
|
||||||
// For update operations, insert previous resource version
|
// For update operations, insert previous resource version
|
||||||
if resVersion := previousResourceVersion[getPreviousResourceVersionKey(r.URL, "")]; resVersion != 0 {
|
if resVersion := previousResourceVersion[getPreviousResourceVersionKey(r.URL, "")]; resVersion != 0 {
|
||||||
resourceVersionJson := fmt.Sprintf(",\r\n\"resourceVersion\": \"%v\"", resVersion)
|
resourceVersionJSON := fmt.Sprintf(",\r\n\"resourceVersion\": \"%v\"", resVersion)
|
||||||
bodyStr = fmt.Sprintf(r.body, resourceVersionJson)
|
bodyStr = fmt.Sprintf(r.body, resourceVersionJSON)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ func (b bootstrapSecrets) Get(name string) (*corev1.Secret, error) {
|
||||||
|
|
||||||
// TestBootstrapTokenAuth tests the bootstrap token auth provider
|
// TestBootstrapTokenAuth tests the bootstrap token auth provider
|
||||||
func TestBootstrapTokenAuth(t *testing.T) {
|
func TestBootstrapTokenAuth(t *testing.T) {
|
||||||
tokenId, err := bootstraputil.GenerateTokenId()
|
tokenID, err := bootstraputil.GenerateTokenId()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
||||||
},
|
},
|
||||||
Type: corev1.SecretTypeBootstrapToken,
|
Type: corev1.SecretTypeBootstrapToken,
|
||||||
Data: map[string][]byte{
|
Data: map[string][]byte{
|
||||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
|
||||||
bootstrapapi.BootstrapTokenSecretKey: []byte(secret),
|
bootstrapapi.BootstrapTokenSecretKey: []byte(secret),
|
||||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||||
},
|
},
|
||||||
|
@ -74,7 +74,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
||||||
},
|
},
|
||||||
Type: corev1.SecretTypeBootstrapToken,
|
Type: corev1.SecretTypeBootstrapToken,
|
||||||
Data: map[string][]byte{
|
Data: map[string][]byte{
|
||||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
|
||||||
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
||||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||||
},
|
},
|
||||||
|
@ -86,7 +86,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
||||||
},
|
},
|
||||||
Type: corev1.SecretTypeBootstrapToken,
|
Type: corev1.SecretTypeBootstrapToken,
|
||||||
Data: map[string][]byte{
|
Data: map[string][]byte{
|
||||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
|
||||||
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
||||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||||
bootstrapapi.BootstrapTokenExpirationKey: []byte(bootstraputil.TimeStringFromNow(-time.Hour)),
|
bootstrapapi.BootstrapTokenExpirationKey: []byte(bootstraputil.TimeStringFromNow(-time.Hour)),
|
||||||
|
@ -134,7 +134,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
||||||
previousResourceVersion := make(map[string]float64)
|
previousResourceVersion := make(map[string]float64)
|
||||||
transport := http.DefaultTransport
|
transport := http.DefaultTransport
|
||||||
|
|
||||||
token := tokenId + "." + secret
|
token := tokenID + "." + secret
|
||||||
var bodyStr string
|
var bodyStr string
|
||||||
if test.request.body != "" {
|
if test.request.body != "" {
|
||||||
sub := ""
|
sub := ""
|
||||||
|
|
|
@ -95,7 +95,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
||||||
|
|
||||||
waitPDBStable(t, clientSet, numOfEvictions, ns.Name, pdb.Name)
|
waitPDBStable(t, clientSet, numOfEvictions, ns.Name, pdb.Name)
|
||||||
|
|
||||||
var numberPodsEvicted uint32 = 0
|
var numberPodsEvicted uint32
|
||||||
errCh := make(chan error, 3*numOfEvictions)
|
errCh := make(chan error, 3*numOfEvictions)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
// spawn numOfEvictions goroutines to concurrently evict the pods
|
// spawn numOfEvictions goroutines to concurrently evict the pods
|
||||||
|
|
|
@ -103,11 +103,13 @@ type MasterHolder struct {
|
||||||
M *master.Master
|
M *master.Master
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMaster assigns the current master.
|
||||||
func (h *MasterHolder) SetMaster(m *master.Master) {
|
func (h *MasterHolder) SetMaster(m *master.Master) {
|
||||||
h.M = m
|
h.M = m
|
||||||
close(h.Initialized)
|
close(h.Initialized)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultOpenAPIConfig returns an openapicommon.Config initialized to default values.
|
||||||
func DefaultOpenAPIConfig() *openapicommon.Config {
|
func DefaultOpenAPIConfig() *openapicommon.Config {
|
||||||
openAPIConfig := genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme))
|
openAPIConfig := genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme))
|
||||||
openAPIConfig.Info = &spec.Info{
|
openAPIConfig.Info = &spec.Info{
|
||||||
|
@ -237,7 +239,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||||
return m, s, closeFn
|
return m, s, closeFn
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the master config appropriate for most integration tests.
|
// NewIntegrationTestMasterConfig returns the master config appropriate for most integration tests.
|
||||||
func NewIntegrationTestMasterConfig() *master.Config {
|
func NewIntegrationTestMasterConfig() *master.Config {
|
||||||
masterConfig := NewMasterConfig()
|
masterConfig := NewMasterConfig()
|
||||||
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
|
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
|
||||||
|
@ -249,7 +251,7 @@ func NewIntegrationTestMasterConfig() *master.Config {
|
||||||
return masterConfig
|
return masterConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a basic master config.
|
// NewMasterConfig returns a basic master config.
|
||||||
func NewMasterConfig() *master.Config {
|
func NewMasterConfig() *master.Config {
|
||||||
// This causes the integration tests to exercise the etcd
|
// This causes the integration tests to exercise the etcd
|
||||||
// prefix code, so please don't change without ensuring
|
// prefix code, so please don't change without ensuring
|
||||||
|
@ -336,6 +338,7 @@ func NewMasterConfig() *master.Config {
|
||||||
// CloseFunc can be called to cleanup the master
|
// CloseFunc can be called to cleanup the master
|
||||||
type CloseFunc func()
|
type CloseFunc func()
|
||||||
|
|
||||||
|
// RunAMaster starts a master with the provided config.
|
||||||
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) {
|
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) {
|
||||||
if masterConfig == nil {
|
if masterConfig == nil {
|
||||||
masterConfig = NewMasterConfig()
|
masterConfig = NewMasterConfig()
|
||||||
|
@ -344,6 +347,7 @@ func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server,
|
||||||
return startMasterOrDie(masterConfig, nil, nil)
|
return startMasterOrDie(masterConfig, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RunAMasterUsingServer starts up a master using the provided config on the specified server.
|
||||||
func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {
|
func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {
|
||||||
return startMasterOrDie(masterConfig, s, masterReceiver)
|
return startMasterOrDie(masterConfig, s, masterReceiver)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,12 +31,14 @@ const (
|
||||||
retries = 5
|
retries = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// IntegrationTestNodePreparer holds configuration information for the test node preparer.
|
||||||
type IntegrationTestNodePreparer struct {
|
type IntegrationTestNodePreparer struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
countToStrategy []testutils.CountToStrategy
|
countToStrategy []testutils.CountToStrategy
|
||||||
nodeNamePrefix string
|
nodeNamePrefix string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewIntegrationTestNodePreparer creates an IntegrationTestNodePreparer configured with defaults.
|
||||||
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
|
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
|
||||||
return &IntegrationTestNodePreparer{
|
return &IntegrationTestNodePreparer{
|
||||||
client: client,
|
client: client,
|
||||||
|
@ -45,6 +47,7 @@ func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrepareNodes prepares countToStrategy test nodes.
|
||||||
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||||
numNodes := 0
|
numNodes := 0
|
||||||
for _, v := range p.countToStrategy {
|
for _, v := range p.countToStrategy {
|
||||||
|
@ -96,6 +99,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CleanupNodes deletes existing test nodes.
|
||||||
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
||||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||||
for i := range nodes.Items {
|
for i := range nodes.Items {
|
||||||
|
|
|
@ -39,6 +39,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/master"
|
"k8s.io/kubernetes/pkg/master"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestServerSetup holds configuration information for a kube-apiserver test server.
|
||||||
type TestServerSetup struct {
|
type TestServerSetup struct {
|
||||||
ModifyServerRunOptions func(*options.ServerRunOptions)
|
ModifyServerRunOptions func(*options.ServerRunOptions)
|
||||||
ModifyServerConfig func(*master.Config)
|
ModifyServerConfig func(*master.Config)
|
||||||
|
|
|
@ -33,6 +33,7 @@ const (
|
||||||
currentPodInfraContainerImageVersion = "3.1"
|
currentPodInfraContainerImageVersion = "3.1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// CreateTestingNamespace creates a namespace for testing.
|
||||||
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *v1.Namespace {
|
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *v1.Namespace {
|
||||||
// TODO: Create a namespace with a given basename.
|
// TODO: Create a namespace with a given basename.
|
||||||
// Currently we neither create the namespace nor delete all of its contents at the end.
|
// Currently we neither create the namespace nor delete all of its contents at the end.
|
||||||
|
@ -45,6 +46,7 @@ func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *test
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteTestingNamespace is currently a no-op function.
|
||||||
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
|
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
|
||||||
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ type base64Plugin struct {
|
||||||
encryptRequest chan *kmsapi.EncryptRequest
|
encryptRequest chan *kmsapi.EncryptRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBase64Plugin() (*base64Plugin, error) {
|
func newBase64Plugin() (*base64Plugin, error) {
|
||||||
listener, err := net.Listen(unixProtocol, sockFile)
|
listener, err := net.Listen(unixProtocol, sockFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to listen on the unix socket, error: %v", err)
|
return nil, fmt.Errorf("failed to listen on the unix socket, error: %v", err)
|
||||||
|
|
|
@ -80,7 +80,7 @@ func (r rawDEKKEKSecret) getPayload() []byte {
|
||||||
// 4. The payload (ex. Secret) should be encrypted via AES CBC transform
|
// 4. The payload (ex. Secret) should be encrypted via AES CBC transform
|
||||||
// 5. Prefix-EncryptedDEK-EncryptedPayload structure should be deposited to ETCD
|
// 5. Prefix-EncryptedDEK-EncryptedPayload structure should be deposited to ETCD
|
||||||
func TestKMSProvider(t *testing.T) {
|
func TestKMSProvider(t *testing.T) {
|
||||||
pluginMock, err := NewBase64Plugin()
|
pluginMock, err := newBase64Plugin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create mock of KMS Plugin: %v", err)
|
t.Fatalf("failed to create mock of KMS Plugin: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -353,7 +353,7 @@ func TestWatchSucceedsWithoutArgs(t *testing.T) {
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
var hpaV1 string = `
|
var hpaV1 = `
|
||||||
{
|
{
|
||||||
"apiVersion": "autoscaling/v1",
|
"apiVersion": "autoscaling/v1",
|
||||||
"kind": "HorizontalPodAutoscaler",
|
"kind": "HorizontalPodAutoscaler",
|
||||||
|
@ -374,7 +374,7 @@ var hpaV1 string = `
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
var deploymentExtensions string = `
|
var deploymentExtensions = `
|
||||||
{
|
{
|
||||||
"apiVersion": "extensions/v1beta1",
|
"apiVersion": "extensions/v1beta1",
|
||||||
"kind": "Deployment",
|
"kind": "Deployment",
|
||||||
|
@ -401,7 +401,7 @@ var deploymentExtensions string = `
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
var deploymentApps string = `
|
var deploymentApps = `
|
||||||
{
|
{
|
||||||
"apiVersion": "apps/v1",
|
"apiVersion": "apps/v1",
|
||||||
"kind": "Deployment",
|
"kind": "Deployment",
|
||||||
|
|
|
@ -448,12 +448,14 @@ func TestAdoption(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if e, a := tc.expectedOwnerReferences(rs), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
|
|
||||||
|
e, a := tc.expectedOwnerReferences(rs), updatedPod.OwnerReferences
|
||||||
|
if reflect.DeepEqual(e, a) {
|
||||||
return true, nil
|
return true, nil
|
||||||
} else {
|
|
||||||
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
|
||||||
return false, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
||||||
|
return false, nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("test %q failed: %v", tc.name, err)
|
t.Fatalf("test %q failed: %v", tc.name, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -448,12 +448,14 @@ func TestAdoption(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if e, a := tc.expectedOwnerReferences(rc), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
|
|
||||||
|
e, a := tc.expectedOwnerReferences(rc), updatedPod.OwnerReferences
|
||||||
|
if reflect.DeepEqual(e, a) {
|
||||||
return true, nil
|
return true, nil
|
||||||
} else {
|
|
||||||
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
|
||||||
return false, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
||||||
|
return false, nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("test %q failed: %v", tc.name, err)
|
t.Fatalf("test %q failed: %v", tc.name, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -156,37 +156,37 @@ func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.Extend
|
||||||
|
|
||||||
if e.nodeCacheCapable {
|
if e.nodeCacheCapable {
|
||||||
return e.filterUsingNodeCache(args)
|
return e.filterUsingNodeCache(args)
|
||||||
} else {
|
}
|
||||||
for _, node := range args.Nodes.Items {
|
|
||||||
fits := true
|
for _, node := range args.Nodes.Items {
|
||||||
for _, predicate := range e.predicates {
|
fits := true
|
||||||
fit, err := predicate(args.Pod, &node)
|
for _, predicate := range e.predicates {
|
||||||
if err != nil {
|
fit, err := predicate(args.Pod, &node)
|
||||||
return &schedulerapi.ExtenderFilterResult{
|
if err != nil {
|
||||||
Nodes: &v1.NodeList{},
|
return &schedulerapi.ExtenderFilterResult{
|
||||||
NodeNames: nil,
|
Nodes: &v1.NodeList{},
|
||||||
FailedNodes: schedulerapi.FailedNodesMap{},
|
NodeNames: nil,
|
||||||
Error: err.Error(),
|
FailedNodes: schedulerapi.FailedNodesMap{},
|
||||||
}, err
|
Error: err.Error(),
|
||||||
}
|
}, err
|
||||||
if !fit {
|
|
||||||
fits = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if fits {
|
if !fit {
|
||||||
filtered = append(filtered, node)
|
fits = false
|
||||||
} else {
|
break
|
||||||
failedNodesMap[node.Name] = fmt.Sprintf("extender failed: %s", e.name)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if fits {
|
||||||
return &schedulerapi.ExtenderFilterResult{
|
filtered = append(filtered, node)
|
||||||
Nodes: &v1.NodeList{Items: filtered},
|
} else {
|
||||||
NodeNames: nil,
|
failedNodesMap[node.Name] = fmt.Sprintf("extender failed: %s", e.name)
|
||||||
FailedNodes: failedNodesMap,
|
}
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return &schedulerapi.ExtenderFilterResult{
|
||||||
|
Nodes: &v1.NodeList{Items: filtered},
|
||||||
|
NodeNames: nil,
|
||||||
|
FailedNodes: failedNodesMap,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.HostPriorityList, error) {
|
func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.HostPriorityList, error) {
|
||||||
|
@ -234,21 +234,21 @@ func (e *Extender) Bind(binding *schedulerapi.ExtenderBindingArgs) error {
|
||||||
return e.Client.CoreV1().Pods(b.Namespace).Bind(b)
|
return e.Client.CoreV1().Pods(b.Namespace).Bind(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine_1_2_3_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
func machine1_2_3Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
|
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine_2_3_5_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
func machine2_3_5Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
|
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
func machine2Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||||
result := schedulerapi.HostPriorityList{}
|
result := schedulerapi.HostPriorityList{}
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
score := 1
|
score := 1
|
||||||
|
@ -263,7 +263,7 @@ func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostP
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
func machine3Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||||
result := schedulerapi.HostPriorityList{}
|
result := schedulerapi.HostPriorityList{}
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
score := 1
|
score := 1
|
||||||
|
@ -284,8 +284,8 @@ func TestSchedulerExtender(t *testing.T) {
|
||||||
|
|
||||||
extender1 := &Extender{
|
extender1 := &Extender{
|
||||||
name: "extender1",
|
name: "extender1",
|
||||||
predicates: []fitPredicate{machine_1_2_3_Predicate},
|
predicates: []fitPredicate{machine1_2_3Predicate},
|
||||||
prioritizers: []priorityConfig{{machine_2_Prioritizer, 1}},
|
prioritizers: []priorityConfig{{machine2Prioritizer, 1}},
|
||||||
}
|
}
|
||||||
es1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
es1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
extender1.serveHTTP(t, w, req)
|
extender1.serveHTTP(t, w, req)
|
||||||
|
@ -294,8 +294,8 @@ func TestSchedulerExtender(t *testing.T) {
|
||||||
|
|
||||||
extender2 := &Extender{
|
extender2 := &Extender{
|
||||||
name: "extender2",
|
name: "extender2",
|
||||||
predicates: []fitPredicate{machine_2_3_5_Predicate},
|
predicates: []fitPredicate{machine2_3_5Predicate},
|
||||||
prioritizers: []priorityConfig{{machine_3_Prioritizer, 1}},
|
prioritizers: []priorityConfig{{machine3Prioritizer, 1}},
|
||||||
Client: clientSet,
|
Client: clientSet,
|
||||||
}
|
}
|
||||||
es2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
es2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
@ -305,8 +305,8 @@ func TestSchedulerExtender(t *testing.T) {
|
||||||
|
|
||||||
extender3 := &Extender{
|
extender3 := &Extender{
|
||||||
name: "extender3",
|
name: "extender3",
|
||||||
predicates: []fitPredicate{machine_1_2_3_Predicate},
|
predicates: []fitPredicate{machine1_2_3Predicate},
|
||||||
prioritizers: []priorityConfig{{machine_2_Prioritizer, 5}},
|
prioritizers: []priorityConfig{{machine2Prioritizer, 5}},
|
||||||
nodeCacheCapable: true,
|
nodeCacheCapable: true,
|
||||||
}
|
}
|
||||||
es3 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
es3 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
|
|
@ -57,7 +57,7 @@ func (tp *TesterPlugin) Reserve(ps plugins.PluginSet, pod *v1.Pod, nodeName stri
|
||||||
|
|
||||||
// Prebind is a test function that returns (true, nil) or errors for testing.
|
// Prebind is a test function that returns (true, nil) or errors for testing.
|
||||||
func (tp *TesterPlugin) Prebind(ps plugins.PluginSet, pod *v1.Pod, nodeName string) (bool, error) {
|
func (tp *TesterPlugin) Prebind(ps plugins.PluginSet, pod *v1.Pod, nodeName string) (bool, error) {
|
||||||
var err error = nil
|
var err error
|
||||||
tp.numPrebindCalled++
|
tp.numPrebindCalled++
|
||||||
if tp.failPrebind {
|
if tp.failPrebind {
|
||||||
err = fmt.Errorf("injecting failure for pod %v", pod.Name)
|
err = fmt.Errorf("injecting failure for pod %v", pod.Name)
|
||||||
|
|
|
@ -373,7 +373,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace int64) *v1.Pod {
|
func mkPriorityPodWithGrace(tc *testContext, name string, priority int32, grace int64) *v1.Pod {
|
||||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
|
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
|
||||||
|
|
|
@ -45,6 +45,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||||
"k8s.io/kubernetes/pkg/scheduler"
|
"k8s.io/kubernetes/pkg/scheduler"
|
||||||
|
// Register defaults in pkg/scheduler/algorithmprovider.
|
||||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||||
|
@ -54,7 +55,7 @@ import (
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TestContext struct {
|
type testContext struct {
|
||||||
closeFn framework.CloseFunc
|
closeFn framework.CloseFunc
|
||||||
httpServer *httptest.Server
|
httpServer *httptest.Server
|
||||||
ns *v1.Namespace
|
ns *v1.Namespace
|
||||||
|
@ -97,8 +98,8 @@ func createConfiguratorWithPodInformer(
|
||||||
|
|
||||||
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
||||||
// configuration.
|
// configuration.
|
||||||
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *testContext {
|
||||||
context := TestContext{
|
context := testContext{
|
||||||
stopCh: make(chan struct{}),
|
stopCh: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,10 +140,10 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
|
||||||
// configuration.
|
// configuration.
|
||||||
func initTestScheduler(
|
func initTestScheduler(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
context *TestContext,
|
context *testContext,
|
||||||
setPodInformer bool,
|
setPodInformer bool,
|
||||||
policy *schedulerapi.Policy,
|
policy *schedulerapi.Policy,
|
||||||
) *TestContext {
|
) *testContext {
|
||||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||||
// feature gate is enabled at the same time.
|
// feature gate is enabled at the same time.
|
||||||
return initTestSchedulerWithOptions(t, context, setPodInformer, policy, nil, false, time.Second)
|
return initTestSchedulerWithOptions(t, context, setPodInformer, policy, nil, false, time.Second)
|
||||||
|
@ -152,13 +153,13 @@ func initTestScheduler(
|
||||||
// configuration and other options.
|
// configuration and other options.
|
||||||
func initTestSchedulerWithOptions(
|
func initTestSchedulerWithOptions(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
context *TestContext,
|
context *testContext,
|
||||||
setPodInformer bool,
|
setPodInformer bool,
|
||||||
policy *schedulerapi.Policy,
|
policy *schedulerapi.Policy,
|
||||||
pluginSet plugins.PluginSet,
|
pluginSet plugins.PluginSet,
|
||||||
disablePreemption bool,
|
disablePreemption bool,
|
||||||
resyncPeriod time.Duration,
|
resyncPeriod time.Duration,
|
||||||
) *TestContext {
|
) *testContext {
|
||||||
// 1. Create scheduler
|
// 1. Create scheduler
|
||||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod)
|
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod)
|
||||||
|
|
||||||
|
@ -234,7 +235,7 @@ func initTestSchedulerWithOptions(
|
||||||
|
|
||||||
// initDisruptionController initializes and runs a Disruption Controller to properly
|
// initDisruptionController initializes and runs a Disruption Controller to properly
|
||||||
// update PodDisuptionBudget objects.
|
// update PodDisuptionBudget objects.
|
||||||
func initDisruptionController(context *TestContext) *disruption.DisruptionController {
|
func initDisruptionController(context *testContext) *disruption.DisruptionController {
|
||||||
informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour)
|
informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour)
|
||||||
|
|
||||||
dc := disruption.NewDisruptionController(
|
dc := disruption.NewDisruptionController(
|
||||||
|
@ -254,20 +255,20 @@ func initDisruptionController(context *TestContext) *disruption.DisruptionContro
|
||||||
|
|
||||||
// initTest initializes a test environment and creates master and scheduler with default
|
// initTest initializes a test environment and creates master and scheduler with default
|
||||||
// configuration.
|
// configuration.
|
||||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
func initTest(t *testing.T, nsPrefix string) *testContext {
|
||||||
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), true, nil)
|
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), true, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
|
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
|
||||||
// configuration but with pod preemption disabled.
|
// configuration but with pod preemption disabled.
|
||||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
func initTestDisablePreemption(t *testing.T, nsPrefix string) *testContext {
|
||||||
return initTestSchedulerWithOptions(
|
return initTestSchedulerWithOptions(
|
||||||
t, initTestMaster(t, nsPrefix, nil), true, nil, nil, true, time.Second)
|
t, initTestMaster(t, nsPrefix, nil), true, nil, nil, true, time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||||
// at the end of a test.
|
// at the end of a test.
|
||||||
func cleanupTest(t *testing.T, context *TestContext) {
|
func cleanupTest(t *testing.T, context *testContext) {
|
||||||
// Kill the scheduler.
|
// Kill the scheduler.
|
||||||
close(context.stopCh)
|
close(context.stopCh)
|
||||||
// Cleanup nodes.
|
// Cleanup nodes.
|
||||||
|
@ -666,7 +667,7 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||||
|
|
||||||
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
|
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
|
||||||
// the expected values.
|
// the expected values.
|
||||||
func waitForPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
func waitForPDBsStable(context *testContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||||
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||||
pdbList, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).List(metav1.ListOptions{})
|
pdbList, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).List(metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -694,7 +695,7 @@ func waitForPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget,
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
||||||
func waitCachedPodsStable(context *TestContext, pods []*v1.Pod) error {
|
func waitCachedPodsStable(context *testContext, pods []*v1.Pod) error {
|
||||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||||
cachedPods, err := context.scheduler.Config().SchedulerCache.List(labels.Everything())
|
cachedPods, err := context.scheduler.Config().SchedulerCache.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -922,7 +922,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initPVController(context *TestContext, provisionDelaySeconds int) (*persistentvolume.PersistentVolumeController, informers.SharedInformerFactory, error) {
|
func initPVController(context *testContext, provisionDelaySeconds int) (*persistentvolume.PersistentVolumeController, informers.SharedInformerFactory, error) {
|
||||||
clientset := context.clientSet
|
clientset := context.clientSet
|
||||||
// Informers factory for controllers, we disable resync period for testing.
|
// Informers factory for controllers, we disable resync period for testing.
|
||||||
informerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
informerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||||
|
|
|
@ -31,6 +31,7 @@ import (
|
||||||
"github.com/coreos/etcd/pkg/transport"
|
"github.com/coreos/etcd/pkg/transport"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DeletePodOrErrorf deletes a pod or fails with a call to t.Errorf.
|
||||||
func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||||
if err := c.CoreV1().Pods(ns).Delete(name, nil); err != nil {
|
if err := c.CoreV1().Pods(ns).Delete(name, nil); err != nil {
|
||||||
t.Errorf("unable to delete pod %v: %v", name, err)
|
t.Errorf("unable to delete pod %v: %v", name, err)
|
||||||
|
@ -39,17 +40,19 @@ func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||||
|
|
||||||
// Requests to try. Each one should be forbidden or not forbidden
|
// Requests to try. Each one should be forbidden or not forbidden
|
||||||
// depending on the authentication and authorization setup of the master.
|
// depending on the authentication and authorization setup of the master.
|
||||||
var Code200 = map[int]bool{200: true}
|
var (
|
||||||
var Code201 = map[int]bool{201: true}
|
Code200 = map[int]bool{200: true}
|
||||||
var Code400 = map[int]bool{400: true}
|
Code201 = map[int]bool{201: true}
|
||||||
var Code401 = map[int]bool{401: true}
|
Code400 = map[int]bool{400: true}
|
||||||
var Code403 = map[int]bool{403: true}
|
Code401 = map[int]bool{401: true}
|
||||||
var Code404 = map[int]bool{404: true}
|
Code403 = map[int]bool{403: true}
|
||||||
var Code405 = map[int]bool{405: true}
|
Code404 = map[int]bool{404: true}
|
||||||
var Code409 = map[int]bool{409: true}
|
Code405 = map[int]bool{405: true}
|
||||||
var Code422 = map[int]bool{422: true}
|
Code409 = map[int]bool{409: true}
|
||||||
var Code500 = map[int]bool{500: true}
|
Code422 = map[int]bool{422: true}
|
||||||
var Code503 = map[int]bool{503: true}
|
Code500 = map[int]bool{500: true}
|
||||||
|
Code503 = map[int]bool{503: true}
|
||||||
|
)
|
||||||
|
|
||||||
// WaitForPodToDisappear polls the API server if the pod has been deleted.
|
// WaitForPodToDisappear polls the API server if the pod has been deleted.
|
||||||
func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error {
|
func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error {
|
||||||
|
@ -57,16 +60,15 @@ func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, in
|
||||||
_, err := podClient.Get(podName, metav1.GetOptions{})
|
_, err := podClient.Get(podName, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
} else {
|
|
||||||
if errors.IsNotFound(err) {
|
|
||||||
return true, nil
|
|
||||||
} else {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetEtcdClients returns an initialized clientv3.Client and clientv3.KV.
|
||||||
func GetEtcdClients(config storagebackend.TransportConfig) (*clientv3.Client, clientv3.KV, error) {
|
func GetEtcdClients(config storagebackend.TransportConfig) (*clientv3.Client, clientv3.KV, error) {
|
||||||
tlsInfo := transport.TLSInfo{
|
tlsInfo := transport.TLSInfo{
|
||||||
CertFile: config.CertFile,
|
CertFile: config.CertFile,
|
||||||
|
|
Loading…
Reference in New Issue