use subtest for table units

remove duplicate testname from error msg

remove subtest for test setup loop

do not break on test failure

  https://github.com/kubernetes/kubernetes/pull/63665#discussion_r203571355

remove duplicate test.name in output

  https://github.com/kubernetes/kubernetes/pull/63665#discussion_r203574001
  https://github.com/kubernetes/kubernetes/pull/63665#discussion_r203574012
pull/8/head
John Calabrese 2018-06-01 10:16:50 -04:00
parent 53ee0c8652
commit ad234e58be
4 changed files with 411 additions and 370 deletions

View File

@ -343,44 +343,45 @@ func TestUpdateResult(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range tests {
node := schedulercache.NewNodeInfo() t.Run(test.name, func(t *testing.T) {
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}} node := schedulercache.NewNodeInfo()
node.SetNode(testNode) testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
node.SetNode(testNode)
// Initialize and populate equivalence class cache. // Initialize and populate equivalence class cache.
ecache := NewCache() ecache := NewCache()
nodeCache, _ := ecache.GetNodeCache(testNode.Name) nodeCache, _ := ecache.GetNodeCache(testNode.Name)
if test.expectPredicateMap { if test.expectPredicateMap {
predicateItem := predicateResult{ predicateItem := predicateResult{
Fit: true, Fit: true,
}
nodeCache.cache[test.predicateKey] =
resultMap{
test.equivalenceHash: predicateItem,
} }
} nodeCache.cache[test.predicateKey] =
resultMap{
nodeCache.updateResult( test.equivalenceHash: predicateItem,
test.pod, }
test.predicateKey,
test.fit,
test.reasons,
test.equivalenceHash,
test.cache,
node,
)
cachedMapItem, ok := nodeCache.cache[test.predicateKey]
if !ok {
t.Errorf("Failed: %s, can't find expected cache item: %v",
test.name, test.expectCacheItem)
} else {
if !reflect.DeepEqual(cachedMapItem[test.equivalenceHash], test.expectCacheItem) {
t.Errorf("Failed: %s, expected cached item: %v, but got: %v",
test.name, test.expectCacheItem, cachedMapItem[test.equivalenceHash])
} }
}
nodeCache.updateResult(
test.pod,
test.predicateKey,
test.fit,
test.reasons,
test.equivalenceHash,
test.cache,
node,
)
cachedMapItem, ok := nodeCache.cache[test.predicateKey]
if !ok {
t.Errorf("can't find expected cache item: %v", test.expectCacheItem)
} else {
if !reflect.DeepEqual(cachedMapItem[test.equivalenceHash], test.expectCacheItem) {
t.Errorf("expected cached item: %v, but got: %v",
test.expectCacheItem, cachedMapItem[test.equivalenceHash])
}
}
})
} }
} }
@ -481,61 +482,63 @@ func TestLookupResult(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}} t.Run(test.name, func(t *testing.T) {
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
// Initialize and populate equivalence class cache. // Initialize and populate equivalence class cache.
ecache := NewCache() ecache := NewCache()
nodeCache, _ := ecache.GetNodeCache(testNode.Name) nodeCache, _ := ecache.GetNodeCache(testNode.Name)
node := schedulercache.NewNodeInfo() node := schedulercache.NewNodeInfo()
node.SetNode(testNode) node.SetNode(testNode)
// set cached item to equivalence cache // set cached item to equivalence cache
nodeCache.updateResult( nodeCache.updateResult(
test.podName, test.podName,
test.predicateKey, test.predicateKey,
test.cachedItem.fit, test.cachedItem.fit,
test.cachedItem.reasons, test.cachedItem.reasons,
test.equivalenceHashForUpdatePredicate, test.equivalenceHashForUpdatePredicate,
test.cache, test.cache,
node, node,
) )
// if we want to do invalid, invalid the cached item // if we want to do invalid, invalid the cached item
if test.expectedPredicateKeyMiss { if test.expectedPredicateKeyMiss {
predicateKeys := sets.NewString() predicateKeys := sets.NewString()
predicateKeys.Insert(test.predicateKey) predicateKeys.Insert(test.predicateKey)
ecache.InvalidatePredicatesOnNode(test.nodeName, predicateKeys) ecache.InvalidatePredicatesOnNode(test.nodeName, predicateKeys)
}
// calculate predicate with equivalence cache
result, ok := nodeCache.lookupResult(test.podName,
test.nodeName,
test.predicateKey,
test.equivalenceHashForCalPredicate,
)
fit, reasons := result.Fit, result.FailReasons
// returned invalid should match expectedPredicateKeyMiss or expectedEquivalenceHashMiss
if test.equivalenceHashForUpdatePredicate != test.equivalenceHashForCalPredicate {
if ok && test.expectedEquivalenceHashMiss {
t.Errorf("Failed: %s, expected (equivalence hash) cache miss", test.name)
} }
if !ok && !test.expectedEquivalenceHashMiss { // calculate predicate with equivalence cache
t.Errorf("Failed: %s, expected (equivalence hash) cache hit", test.name) result, ok := nodeCache.lookupResult(test.podName,
test.nodeName,
test.predicateKey,
test.equivalenceHashForCalPredicate,
)
fit, reasons := result.Fit, result.FailReasons
// returned invalid should match expectedPredicateKeyMiss or expectedEquivalenceHashMiss
if test.equivalenceHashForUpdatePredicate != test.equivalenceHashForCalPredicate {
if ok && test.expectedEquivalenceHashMiss {
t.Errorf("Failed: %s, expected (equivalence hash) cache miss", test.name)
}
if !ok && !test.expectedEquivalenceHashMiss {
t.Errorf("Failed: %s, expected (equivalence hash) cache hit", test.name)
}
} else {
if ok && test.expectedPredicateKeyMiss {
t.Errorf("Failed: %s, expected (predicate key) cache miss", test.name)
}
if !ok && !test.expectedPredicateKeyMiss {
t.Errorf("Failed: %s, expected (predicate key) cache hit", test.name)
}
} }
} else { // returned predicate result should match expected predicate item
if ok && test.expectedPredicateKeyMiss { if fit != test.expectedPredicateItem.fit {
t.Errorf("Failed: %s, expected (predicate key) cache miss", test.name) t.Errorf("Failed: %s, expected fit: %v, but got: %v", test.name, test.cachedItem.fit, fit)
} }
if !ok && !test.expectedPredicateKeyMiss { if !slicesEqual(reasons, test.expectedPredicateItem.reasons) {
t.Errorf("Failed: %s, expected (predicate key) cache hit", test.name) t.Errorf("Failed: %s, expected reasons: %v, but got: %v",
test.name, test.expectedPredicateItem.reasons, reasons)
} }
} })
// returned predicate result should match expected predicate item
if fit != test.expectedPredicateItem.fit {
t.Errorf("Failed: %s, expected fit: %v, but got: %v", test.name, test.cachedItem.fit, fit)
}
if !slicesEqual(reasons, test.expectedPredicateItem.reasons) {
t.Errorf("Failed: %s, expected reasons: %v, but got: %v",
test.name, test.expectedPredicateItem.reasons, reasons)
}
} }
} }
@ -658,6 +661,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
testPredicate := "GeneralPredicates" testPredicate := "GeneralPredicates"
// tests is used to initialize all nodes // tests is used to initialize all nodes
tests := []struct { tests := []struct {
name string
podName string podName string
nodeName string nodeName string
equivalenceHashForUpdatePredicate uint64 equivalenceHashForUpdatePredicate uint64
@ -665,6 +669,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
cache schedulercache.Cache cache schedulercache.Cache
}{ }{
{ {
name: "hash predicate 123 not fits host ports",
podName: "testPod", podName: "testPod",
nodeName: "node1", nodeName: "node1",
equivalenceHashForUpdatePredicate: 123, equivalenceHashForUpdatePredicate: 123,
@ -677,6 +682,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
cache: &upToDateCache{}, cache: &upToDateCache{},
}, },
{ {
name: "hash predicate 456 not fits host ports",
podName: "testPod", podName: "testPod",
nodeName: "node2", nodeName: "node2",
equivalenceHashForUpdatePredicate: 456, equivalenceHashForUpdatePredicate: 456,
@ -689,6 +695,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
cache: &upToDateCache{}, cache: &upToDateCache{},
}, },
{ {
name: "hash predicate 123 fits",
podName: "testPod", podName: "testPod",
nodeName: "node3", nodeName: "node3",
equivalenceHashForUpdatePredicate: 123, equivalenceHashForUpdatePredicate: 123,
@ -723,13 +730,14 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
// there should be no cached predicate any more // there should be no cached predicate any more
for _, test := range tests { for _, test := range tests {
if nodeCache, exist := ecache.nodeToCache[test.nodeName]; exist { t.Run(test.name, func(t *testing.T) {
if _, exist := nodeCache.cache[testPredicate]; exist { if nodeCache, exist := ecache.nodeToCache[test.nodeName]; exist {
t.Errorf("Failed: cached item for predicate key: %v on node: %v should be invalidated", if _, exist := nodeCache.cache[testPredicate]; exist {
testPredicate, test.nodeName) t.Errorf("Failed: cached item for predicate key: %v on node: %v should be invalidated",
break testPredicate, test.nodeName)
}
} }
} })
} }
} }
@ -737,6 +745,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
testPredicate := "GeneralPredicates" testPredicate := "GeneralPredicates"
// tests is used to initialize all nodes // tests is used to initialize all nodes
tests := []struct { tests := []struct {
name string
podName string podName string
nodeName string nodeName string
equivalenceHashForUpdatePredicate uint64 equivalenceHashForUpdatePredicate uint64
@ -744,6 +753,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
cache schedulercache.Cache cache schedulercache.Cache
}{ }{
{ {
name: "hash predicate 123 not fits host ports",
podName: "testPod", podName: "testPod",
nodeName: "node1", nodeName: "node1",
equivalenceHashForUpdatePredicate: 123, equivalenceHashForUpdatePredicate: 123,
@ -754,6 +764,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
cache: &upToDateCache{}, cache: &upToDateCache{},
}, },
{ {
name: "hash predicate 456 not fits host ports",
podName: "testPod", podName: "testPod",
nodeName: "node2", nodeName: "node2",
equivalenceHashForUpdatePredicate: 456, equivalenceHashForUpdatePredicate: 456,
@ -764,6 +775,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
cache: &upToDateCache{}, cache: &upToDateCache{},
}, },
{ {
name: "hash predicate 123 fits host ports",
podName: "testPod", podName: "testPod",
nodeName: "node3", nodeName: "node3",
equivalenceHashForUpdatePredicate: 123, equivalenceHashForUpdatePredicate: 123,
@ -794,12 +806,13 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
// invalidate all cached predicate for node t.Run(test.name, func(t *testing.T) {
ecache.InvalidateAllPredicatesOnNode(test.nodeName) // invalidate cached predicate for all nodes
if _, ok := ecache.GetNodeCache(test.nodeName); ok { ecache.InvalidateAllPredicatesOnNode(test.nodeName)
t.Errorf("Failed: node: %v should not be found in internal cache", test.nodeName) if _, ok := ecache.GetNodeCache(test.nodeName); ok {
break t.Errorf("Failed: cached item for node: %v should be invalidated", test.nodeName)
} }
})
} }
} }

View File

@ -492,43 +492,45 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
extenders := []algorithm.SchedulerExtender{} t.Run(test.name, func(t *testing.T) {
for ii := range test.extenders { extenders := []algorithm.SchedulerExtender{}
extenders = append(extenders, &test.extenders[ii]) for ii := range test.extenders {
} extenders = append(extenders, &test.extenders[ii])
cache := schedulercache.New(time.Duration(0), wait.NeverStop)
for _, name := range test.nodes {
cache.AddNode(createNode(name))
}
queue := NewSchedulingQueue()
scheduler := NewGenericScheduler(
cache,
nil,
queue,
test.predicates,
algorithm.EmptyPredicateMetadataProducer,
test.prioritizers,
algorithm.EmptyPriorityMetadataProducer,
extenders,
nil,
schedulertesting.FakePersistentVolumeClaimLister{},
false,
false)
podIgnored := &v1.Pod{}
machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
if test.expectsErr {
if err == nil {
t.Errorf("Unexpected non-error for %s, machine %s", test.name, machine)
} }
} else { cache := schedulercache.New(time.Duration(0), wait.NeverStop)
if err != nil { for _, name := range test.nodes {
t.Errorf("Unexpected error: %v", err) cache.AddNode(createNode(name))
continue
} }
if test.expectedHost != machine { queue := NewSchedulingQueue()
t.Errorf("Failed : %s, Expected: %s, Saw: %s", test.name, test.expectedHost, machine) scheduler := NewGenericScheduler(
cache,
nil,
queue,
test.predicates,
algorithm.EmptyPredicateMetadataProducer,
test.prioritizers,
algorithm.EmptyPriorityMetadataProducer,
extenders,
nil,
schedulertesting.FakePersistentVolumeClaimLister{},
false,
false)
podIgnored := &v1.Pod{}
machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
if test.expectsErr {
if err == nil {
t.Errorf("Unexpected non-error, machine %s", machine)
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
if test.expectedHost != machine {
t.Errorf("Expected: %s, Saw: %s", test.expectedHost, machine)
}
} }
} })
} }
} }

View File

@ -123,11 +123,13 @@ func makeNodeList(nodeNames []string) []*v1.Node {
func TestSelectHost(t *testing.T) { func TestSelectHost(t *testing.T) {
scheduler := genericScheduler{} scheduler := genericScheduler{}
tests := []struct { tests := []struct {
name string
list schedulerapi.HostPriorityList list schedulerapi.HostPriorityList
possibleHosts sets.String possibleHosts sets.String
expectsErr bool expectsErr bool
}{ }{
{ {
name: "unique properly ordered scores",
list: []schedulerapi.HostPriority{ list: []schedulerapi.HostPriority{
{Host: "machine1.1", Score: 1}, {Host: "machine1.1", Score: 1},
{Host: "machine2.1", Score: 2}, {Host: "machine2.1", Score: 2},
@ -135,8 +137,8 @@ func TestSelectHost(t *testing.T) {
possibleHosts: sets.NewString("machine2.1"), possibleHosts: sets.NewString("machine2.1"),
expectsErr: false, expectsErr: false,
}, },
// equal scores
{ {
name: "equal scores",
list: []schedulerapi.HostPriority{ list: []schedulerapi.HostPriority{
{Host: "machine1.1", Score: 1}, {Host: "machine1.1", Score: 1},
{Host: "machine1.2", Score: 2}, {Host: "machine1.2", Score: 2},
@ -146,8 +148,8 @@ func TestSelectHost(t *testing.T) {
possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"), possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"),
expectsErr: false, expectsErr: false,
}, },
// out of order scores
{ {
name: "out of order scores",
list: []schedulerapi.HostPriority{ list: []schedulerapi.HostPriority{
{Host: "machine1.1", Score: 3}, {Host: "machine1.1", Score: 3},
{Host: "machine1.2", Score: 3}, {Host: "machine1.2", Score: 3},
@ -158,8 +160,8 @@ func TestSelectHost(t *testing.T) {
possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"), possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"),
expectsErr: false, expectsErr: false,
}, },
// empty priorityList
{ {
name: "empty priority list",
list: []schedulerapi.HostPriority{}, list: []schedulerapi.HostPriority{},
possibleHosts: sets.NewString(), possibleHosts: sets.NewString(),
expectsErr: true, expectsErr: true,
@ -167,22 +169,24 @@ func TestSelectHost(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
// increase the randomness t.Run(test.name, func(t *testing.T) {
for i := 0; i < 10; i++ { // increase the randomness
got, err := scheduler.selectHost(test.list) for i := 0; i < 10; i++ {
if test.expectsErr { got, err := scheduler.selectHost(test.list)
if err == nil { if test.expectsErr {
t.Error("Unexpected non-error") if err == nil {
} t.Error("Unexpected non-error")
} else { }
if err != nil { } else {
t.Errorf("Unexpected error: %v", err) if err != nil {
} t.Errorf("Unexpected error: %v", err)
if !test.possibleHosts.Has(got) { }
t.Errorf("got %s is not in the possible map %v", got, test.possibleHosts) if !test.possibleHosts.Has(got) {
t.Errorf("got %s is not in the possible map %v", got, test.possibleHosts)
}
} }
} }
} })
} }
} }
@ -398,39 +402,41 @@ func TestGenericScheduler(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range tests {
cache := schedulercache.New(time.Duration(0), wait.NeverStop) t.Run(test.name, func(t *testing.T) {
for _, pod := range test.pods { cache := schedulercache.New(time.Duration(0), wait.NeverStop)
cache.AddPod(pod) for _, pod := range test.pods {
} cache.AddPod(pod)
for _, name := range test.nodes { }
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}}) for _, name := range test.nodes {
} cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}})
pvcs := []*v1.PersistentVolumeClaim{} }
pvcs = append(pvcs, test.pvcs...) pvcs := []*v1.PersistentVolumeClaim{}
pvcs = append(pvcs, test.pvcs...)
pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs) pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs)
scheduler := NewGenericScheduler( scheduler := NewGenericScheduler(
cache, cache,
nil, nil,
NewSchedulingQueue(), NewSchedulingQueue(),
test.predicates, test.predicates,
algorithm.EmptyPredicateMetadataProducer, algorithm.EmptyPredicateMetadataProducer,
test.prioritizers, test.prioritizers,
algorithm.EmptyPriorityMetadataProducer, algorithm.EmptyPriorityMetadataProducer,
[]algorithm.SchedulerExtender{}, []algorithm.SchedulerExtender{},
nil, nil,
pvcLister, pvcLister,
test.alwaysCheckAllPredicates, test.alwaysCheckAllPredicates,
false) false)
machine, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) machine, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
if !reflect.DeepEqual(err, test.wErr) { if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("Failed : %s, Unexpected error: %v, expected: %v", test.name, err, test.wErr) t.Errorf("Unexpected error: %v, expected: %v", err, test.wErr)
} }
if test.expectedHosts != nil && !test.expectedHosts.Has(machine) { if test.expectedHosts != nil && !test.expectedHosts.Has(machine) {
t.Errorf("Failed : %s, Expected: %s, got: %s", test.name, test.expectedHosts, machine) t.Errorf("Expected: %s, got: %s", test.expectedHosts, machine)
} }
})
} }
} }
@ -473,13 +479,15 @@ func TestFindFitAllError(t *testing.T) {
} }
for _, node := range nodes { for _, node := range nodes {
failures, found := predicateMap[node.Name] t.Run(node.Name, func(t *testing.T) {
if !found { failures, found := predicateMap[node.Name]
t.Errorf("failed to find node: %s in %v", node.Name, predicateMap) if !found {
} t.Errorf("failed to find node in %v", predicateMap)
if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate { }
t.Errorf("unexpected failures: %v", failures) if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate {
} t.Errorf("unexpected failures: %v", failures)
}
})
} }
} }
@ -503,13 +511,15 @@ func TestFindFitSomeError(t *testing.T) {
if node.Name == pod.Name { if node.Name == pod.Name {
continue continue
} }
failures, found := predicateMap[node.Name] t.Run(node.Name, func(t *testing.T) {
if !found { failures, found := predicateMap[node.Name]
t.Errorf("failed to find node: %s in %v", node.Name, predicateMap) if !found {
} t.Errorf("failed to find node in %v", predicateMap)
if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate { }
t.Errorf("unexpected failures: %v", failures) if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate {
} t.Errorf("unexpected failures: %v", failures)
}
})
} }
} }
@ -604,7 +614,7 @@ func TestZeroRequest(t *testing.T) {
pod *v1.Pod pod *v1.Pod
pods []*v1.Pod pods []*v1.Pod
nodes []*v1.Node nodes []*v1.Node
test string name string
}{ }{
// The point of these next two tests is to show you get the same priority for a zero-request pod // The point of these next two tests is to show you get the same priority for a zero-request pod
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine // as for a pod with the defaults requests, both when the zero-request pod is already on the machine
@ -612,7 +622,7 @@ func TestZeroRequest(t *testing.T) {
{ {
pod: &v1.Pod{Spec: noResources}, pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of zero-request pod with machine with zero-request pod", name: "test priority of zero-request pod with machine with zero-request pod",
pods: []*v1.Pod{ pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1}, {Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2}, {Spec: large2}, {Spec: small2},
@ -621,7 +631,7 @@ func TestZeroRequest(t *testing.T) {
{ {
pod: &v1.Pod{Spec: small}, pod: &v1.Pod{Spec: small},
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of nonzero-request pod with machine with zero-request pod", name: "test priority of nonzero-request pod with machine with zero-request pod",
pods: []*v1.Pod{ pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1}, {Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2}, {Spec: large2}, {Spec: small2},
@ -631,7 +641,7 @@ func TestZeroRequest(t *testing.T) {
{ {
pod: &v1.Pod{Spec: large}, pod: &v1.Pod{Spec: large},
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of larger pod with machine with zero-request pod", name: "test priority of larger pod with machine with zero-request pod",
pods: []*v1.Pod{ pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1}, {Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2}, {Spec: large2}, {Spec: small2},
@ -641,47 +651,49 @@ func TestZeroRequest(t *testing.T) {
const expectedPriority int = 25 const expectedPriority int = 25
for _, test := range tests { for _, test := range tests {
// This should match the configuration in defaultPriorities() in t.Run(test.name, func(t *testing.T) {
// pkg/scheduler/algorithmprovider/defaults/defaults.go if you want // This should match the configuration in defaultPriorities() in
// to test what's actually in production. // pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
priorityConfigs := []algorithm.PriorityConfig{ // to test what's actually in production.
{Map: algorithmpriorities.LeastRequestedPriorityMap, Weight: 1}, priorityConfigs := []algorithm.PriorityConfig{
{Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1}, {Map: algorithmpriorities.LeastRequestedPriorityMap, Weight: 1},
} {Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1},
selectorSpreadPriorityMap, selectorSpreadPriorityReduce := algorithmpriorities.NewSelectorSpreadPriority( }
schedulertesting.FakeServiceLister([]*v1.Service{}), selectorSpreadPriorityMap, selectorSpreadPriorityReduce := algorithmpriorities.NewSelectorSpreadPriority(
schedulertesting.FakeControllerLister([]*v1.ReplicationController{}), schedulertesting.FakeServiceLister([]*v1.Service{}),
schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}), schedulertesting.FakeControllerLister([]*v1.ReplicationController{}),
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{})) schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}),
pc := algorithm.PriorityConfig{Map: selectorSpreadPriorityMap, Reduce: selectorSpreadPriorityReduce, Weight: 1} schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
priorityConfigs = append(priorityConfigs, pc) pc := algorithm.PriorityConfig{Map: selectorSpreadPriorityMap, Reduce: selectorSpreadPriorityReduce, Weight: 1}
priorityConfigs = append(priorityConfigs, pc)
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
mataDataProducer := algorithmpriorities.NewPriorityMetadataFactory( mataDataProducer := algorithmpriorities.NewPriorityMetadataFactory(
schedulertesting.FakeServiceLister([]*v1.Service{}), schedulertesting.FakeServiceLister([]*v1.Service{}),
schedulertesting.FakeControllerLister([]*v1.ReplicationController{}), schedulertesting.FakeControllerLister([]*v1.ReplicationController{}),
schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}), schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}),
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{})) schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
mataData := mataDataProducer(test.pod, nodeNameToInfo) mataData := mataDataProducer(test.pod, nodeNameToInfo)
list, err := PrioritizeNodes( list, err := PrioritizeNodes(
test.pod, nodeNameToInfo, mataData, priorityConfigs, test.pod, nodeNameToInfo, mataData, priorityConfigs,
schedulertesting.FakeNodeLister(test.nodes), []algorithm.SchedulerExtender{}) schedulertesting.FakeNodeLister(test.nodes), []algorithm.SchedulerExtender{})
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
for _, hp := range list { for _, hp := range list {
if test.test == "test priority of larger pod with machine with zero-request pod" { if test.name == "test priority of larger pod with machine with zero-request pod" {
if hp.Score == expectedPriority { if hp.Score == expectedPriority {
t.Errorf("%s: expected non-%d for all priorities, got list %#v", test.test, expectedPriority, list) t.Errorf("expected non-%d for all priorities, got list %#v", expectedPriority, list)
} }
} else { } else {
if hp.Score != expectedPriority { if hp.Score != expectedPriority {
t.Errorf("%s: expected %d for all priorities, got list %#v", test.test, expectedPriority, list) t.Errorf("expected %d for all priorities, got list %#v", expectedPriority, list)
}
} }
} }
} })
} }
} }
@ -697,30 +709,30 @@ func printNodeToVictims(nodeToVictims map[*v1.Node]*schedulerapi.Victims) string
return output return output
} }
func checkPreemptionVictims(testName string, expected map[string]map[string]bool, nodeToPods map[*v1.Node]*schedulerapi.Victims) error { func checkPreemptionVictims(expected map[string]map[string]bool, nodeToPods map[*v1.Node]*schedulerapi.Victims) error {
if len(expected) == len(nodeToPods) { if len(expected) == len(nodeToPods) {
for k, victims := range nodeToPods { for k, victims := range nodeToPods {
if expPods, ok := expected[k.Name]; ok { if expPods, ok := expected[k.Name]; ok {
if len(victims.Pods) != len(expPods) { if len(victims.Pods) != len(expPods) {
return fmt.Errorf("test [%v]: unexpected number of pods. expected: %v, got: %v", testName, expected, printNodeToVictims(nodeToPods)) return fmt.Errorf("unexpected number of pods. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
} }
prevPriority := int32(math.MaxInt32) prevPriority := int32(math.MaxInt32)
for _, p := range victims.Pods { for _, p := range victims.Pods {
// Check that pods are sorted by their priority. // Check that pods are sorted by their priority.
if *p.Spec.Priority > prevPriority { if *p.Spec.Priority > prevPriority {
return fmt.Errorf("test [%v]: pod %v of node %v was not sorted by priority", testName, p.Name, k) return fmt.Errorf("pod %v of node %v was not sorted by priority", p.Name, k)
} }
prevPriority = *p.Spec.Priority prevPriority = *p.Spec.Priority
if _, ok := expPods[p.Name]; !ok { if _, ok := expPods[p.Name]; !ok {
return fmt.Errorf("test [%v]: pod %v was not expected. Expected: %v", testName, p.Name, expPods) return fmt.Errorf("pod %v was not expected. Expected: %v", p.Name, expPods)
} }
} }
} else { } else {
return fmt.Errorf("test [%v]: unexpected machines. expected: %v, got: %v", testName, expected, printNodeToVictims(nodeToPods)) return fmt.Errorf("unexpected machines. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
} }
} }
} else { } else {
return fmt.Errorf("test [%v]: unexpected number of machines. expected: %v, got: %v", testName, expected, printNodeToVictims(nodeToPods)) return fmt.Errorf("unexpected number of machines. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
} }
return nil return nil
} }
@ -906,23 +918,25 @@ func TestSelectNodesForPreemption(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range tests {
nodes := []*v1.Node{} t.Run(test.name, func(t *testing.T) {
for _, n := range test.nodes { nodes := []*v1.Node{}
node := makeNode(n, 1000*5, priorityutil.DefaultMemoryRequest*5) for _, n := range test.nodes {
node.ObjectMeta.Labels = map[string]string{"hostname": node.Name} node := makeNode(n, 1000*5, priorityutil.DefaultMemoryRequest*5)
nodes = append(nodes, node) node.ObjectMeta.Labels = map[string]string{"hostname": node.Name}
} nodes = append(nodes, node)
if test.addAffinityPredicate { }
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods)) if test.addAffinityPredicate {
} test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes) }
nodeToPods, err := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil) nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
if err != nil { nodeToPods, err := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
t.Error(err) if err != nil {
} t.Error(err)
if err := checkPreemptionVictims(test.name, test.expected, nodeToPods); err != nil { }
t.Error(err) if err := checkPreemptionVictims(test.expected, nodeToPods); err != nil {
} t.Error(err)
}
})
} }
} }
@ -1069,23 +1083,25 @@ func TestPickOneNodeForPreemption(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range tests {
nodes := []*v1.Node{} t.Run(test.name, func(t *testing.T) {
for _, n := range test.nodes { nodes := []*v1.Node{}
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5)) for _, n := range test.nodes {
} nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
node := pickOneNodeForPreemption(candidateNodes)
found := false
for _, nodeName := range test.expected {
if node.Name == nodeName {
found = true
break
} }
} nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
if !found { candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
t.Errorf("test [%v]: unexpected node: %v", test.name, node) node := pickOneNodeForPreemption(candidateNodes)
} found := false
for _, nodeName := range test.expected {
if node.Name == nodeName {
found = true
break
}
}
if !found {
t.Errorf("unexpected node: %v", node)
}
})
} }
} }
@ -1159,15 +1175,17 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
nodes := nodesWherePreemptionMightHelp(makeNodeList(nodeNames), test.failedPredMap) t.Run(test.name, func(t *testing.T) {
if len(test.expected) != len(nodes) { nodes := nodesWherePreemptionMightHelp(makeNodeList(nodeNames), test.failedPredMap)
t.Errorf("test [%v]:number of nodes is not the same as expected. exptectd: %d, got: %d. Nodes: %v", test.name, len(test.expected), len(nodes), nodes) if len(test.expected) != len(nodes) {
} t.Errorf("number of nodes is not the same as expected. exptectd: %d, got: %d. Nodes: %v", len(test.expected), len(nodes), nodes)
for _, node := range nodes {
if _, found := test.expected[node.Name]; !found {
t.Errorf("test [%v]: node %v is not expected.", test.name, node.Name)
} }
} for _, node := range nodes {
if _, found := test.expected[node.Name]; !found {
t.Errorf("node %v is not expected.", node.Name)
}
}
})
} }
} }
@ -1290,76 +1308,78 @@ func TestPreempt(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
stop := make(chan struct{}) t.Run(test.name, func(t *testing.T) {
cache := schedulercache.New(time.Duration(0), stop) stop := make(chan struct{})
for _, pod := range test.pods { cache := schedulercache.New(time.Duration(0), stop)
cache.AddPod(pod) for _, pod := range test.pods {
} cache.AddPod(pod)
cachedNodeInfoMap := map[string]*schedulercache.NodeInfo{} }
for _, name := range nodeNames { cachedNodeInfoMap := map[string]*schedulercache.NodeInfo{}
node := makeNode(name, 1000*5, priorityutil.DefaultMemoryRequest*5) for _, name := range nodeNames {
cache.AddNode(node) node := makeNode(name, 1000*5, priorityutil.DefaultMemoryRequest*5)
cache.AddNode(node)
// Set nodeInfo to extenders to mock extenders' cache for preemption. // Set nodeInfo to extenders to mock extenders' cache for preemption.
cachedNodeInfo := schedulercache.NewNodeInfo() cachedNodeInfo := schedulercache.NewNodeInfo()
cachedNodeInfo.SetNode(node) cachedNodeInfo.SetNode(node)
cachedNodeInfoMap[name] = cachedNodeInfo cachedNodeInfoMap[name] = cachedNodeInfo
} }
extenders := []algorithm.SchedulerExtender{} extenders := []algorithm.SchedulerExtender{}
for _, extender := range test.extenders { for _, extender := range test.extenders {
// Set nodeInfoMap as extenders cached node information. // Set nodeInfoMap as extenders cached node information.
extender.cachedNodeNameToInfo = cachedNodeInfoMap extender.cachedNodeNameToInfo = cachedNodeInfoMap
extenders = append(extenders, extender) extenders = append(extenders, extender)
} }
scheduler := NewGenericScheduler( scheduler := NewGenericScheduler(
cache, cache,
nil, nil,
NewSchedulingQueue(), NewSchedulingQueue(),
map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
algorithm.EmptyPredicateMetadataProducer, algorithm.EmptyPredicateMetadataProducer,
[]algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
algorithm.EmptyPriorityMetadataProducer, algorithm.EmptyPriorityMetadataProducer,
extenders, extenders,
nil, nil,
schedulertesting.FakePersistentVolumeClaimLister{}, schedulertesting.FakePersistentVolumeClaimLister{},
false, false,
false) false)
// Call Preempt and check the expected results. // Call Preempt and check the expected results.
node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap})) node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
if err != nil { if err != nil {
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err) t.Errorf("unexpected error in preemption: %v", err)
} }
if (node != nil && node.Name != test.expectedNode) || (node == nil && len(test.expectedNode) != 0) { if (node != nil && node.Name != test.expectedNode) || (node == nil && len(test.expectedNode) != 0) {
t.Errorf("test [%v]: expected node: %v, got: %v", test.name, test.expectedNode, node) t.Errorf("expected node: %v, got: %v", test.expectedNode, node)
} }
if len(victims) != len(test.expectedPods) { if len(victims) != len(test.expectedPods) {
t.Errorf("test [%v]: expected %v pods, got %v.", test.name, len(test.expectedPods), len(victims)) t.Errorf("expected %v pods, got %v.", len(test.expectedPods), len(victims))
} }
for _, victim := range victims { for _, victim := range victims {
found := false found := false
for _, expPod := range test.expectedPods { for _, expPod := range test.expectedPods {
if expPod == victim.Name { if expPod == victim.Name {
found = true found = true
break break
}
} }
if !found {
t.Errorf("pod %v is not expected to be a victim.", victim.Name)
}
// Mark the victims for deletion and record the preemptor's nominated node name.
now := metav1.Now()
victim.DeletionTimestamp = &now
test.pod.Status.NominatedNodeName = node.Name
} }
if !found { // Call preempt again and make sure it doesn't preempt any more pods.
t.Errorf("test [%v]: pod %v is not expected to be a victim.", test.name, victim.Name) node, victims, _, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
if err != nil {
t.Errorf("unexpected error in preemption: %v", err)
} }
// Mark the victims for deletion and record the preemptor's nominated node name. if node != nil && len(victims) > 0 {
now := metav1.Now() t.Errorf("didn't expect any more preemption. Node %v is selected for preemption.", node)
victim.DeletionTimestamp = &now }
test.pod.Status.NominatedNodeName = node.Name close(stop)
} })
// Call preempt again and make sure it doesn't preempt any more pods.
node, victims, _, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
if err != nil {
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err)
}
if node != nil && len(victims) > 0 {
t.Errorf("test [%v]: didn't expect any more preemption. Node %v is selected for preemption.", test.name, node)
}
close(stop)
} }
} }

View File

@ -375,6 +375,7 @@ func TestUnschedulablePodsMap(t *testing.T) {
updatedPods[3] = pods[3].DeepCopy() updatedPods[3] = pods[3].DeepCopy()
tests := []struct { tests := []struct {
name string
podsToAdd []*v1.Pod podsToAdd []*v1.Pod
expectedMapAfterAdd map[string]*v1.Pod expectedMapAfterAdd map[string]*v1.Pod
podsToUpdate []*v1.Pod podsToUpdate []*v1.Pod
@ -383,6 +384,7 @@ func TestUnschedulablePodsMap(t *testing.T) {
expectedMapAfterDelete map[string]*v1.Pod expectedMapAfterDelete map[string]*v1.Pod
}{ }{
{ {
name: "create, update, delete subset of pods",
podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]}, podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]},
expectedMapAfterAdd: map[string]*v1.Pod{ expectedMapAfterAdd: map[string]*v1.Pod{
util.GetPodFullName(pods[0]): pods[0], util.GetPodFullName(pods[0]): pods[0],
@ -404,6 +406,7 @@ func TestUnschedulablePodsMap(t *testing.T) {
}, },
}, },
{ {
name: "create, update, delete all",
podsToAdd: []*v1.Pod{pods[0], pods[3]}, podsToAdd: []*v1.Pod{pods[0], pods[3]},
expectedMapAfterAdd: map[string]*v1.Pod{ expectedMapAfterAdd: map[string]*v1.Pod{
util.GetPodFullName(pods[0]): pods[0], util.GetPodFullName(pods[0]): pods[0],
@ -418,6 +421,7 @@ func TestUnschedulablePodsMap(t *testing.T) {
expectedMapAfterDelete: map[string]*v1.Pod{}, expectedMapAfterDelete: map[string]*v1.Pod{},
}, },
{ {
name: "delete non-existing and existing pods",
podsToAdd: []*v1.Pod{pods[1], pods[2]}, podsToAdd: []*v1.Pod{pods[1], pods[2]},
expectedMapAfterAdd: map[string]*v1.Pod{ expectedMapAfterAdd: map[string]*v1.Pod{
util.GetPodFullName(pods[1]): pods[1], util.GetPodFullName(pods[1]): pods[1],
@ -435,35 +439,37 @@ func TestUnschedulablePodsMap(t *testing.T) {
}, },
} }
for i, test := range tests { for _, test := range tests {
upm := newUnschedulablePodsMap() t.Run(test.name, func(t *testing.T) {
for _, p := range test.podsToAdd { upm := newUnschedulablePodsMap()
upm.addOrUpdate(p) for _, p := range test.podsToAdd {
}
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterAdd) {
t.Errorf("#%d: Unexpected map after adding pods. Expected: %v, got: %v",
i, test.expectedMapAfterAdd, upm.pods)
}
if len(test.podsToUpdate) > 0 {
for _, p := range test.podsToUpdate {
upm.addOrUpdate(p) upm.addOrUpdate(p)
} }
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterUpdate) { if !reflect.DeepEqual(upm.pods, test.expectedMapAfterAdd) {
t.Errorf("#%d: Unexpected map after updating pods. Expected: %v, got: %v", t.Errorf("Unexpected map after adding pods. Expected: %v, got: %v",
i, test.expectedMapAfterUpdate, upm.pods) test.expectedMapAfterAdd, upm.pods)
} }
}
for _, p := range test.podsToDelete { if len(test.podsToUpdate) > 0 {
upm.delete(p) for _, p := range test.podsToUpdate {
} upm.addOrUpdate(p)
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterDelete) { }
t.Errorf("#%d: Unexpected map after deleting pods. Expected: %v, got: %v", if !reflect.DeepEqual(upm.pods, test.expectedMapAfterUpdate) {
i, test.expectedMapAfterDelete, upm.pods) t.Errorf("Unexpected map after updating pods. Expected: %v, got: %v",
} test.expectedMapAfterUpdate, upm.pods)
upm.clear() }
if len(upm.pods) != 0 { }
t.Errorf("Expected the map to be empty, but has %v elements.", len(upm.pods)) for _, p := range test.podsToDelete {
} upm.delete(p)
}
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterDelete) {
t.Errorf("Unexpected map after deleting pods. Expected: %v, got: %v",
test.expectedMapAfterDelete, upm.pods)
}
upm.clear()
if len(upm.pods) != 0 {
t.Errorf("Expected the map to be empty, but has %v elements.", len(upm.pods))
}
})
} }
} }