mirror of https://github.com/k3s-io/k3s
use subtest for table units
remove duplicate testname from error msg remove subtest for test setup loop do not break on test failure https://github.com/kubernetes/kubernetes/pull/63665#discussion_r203571355 remove duplicate test.name in output https://github.com/kubernetes/kubernetes/pull/63665#discussion_r203574001 https://github.com/kubernetes/kubernetes/pull/63665#discussion_r203574012pull/8/head
parent
53ee0c8652
commit
ad234e58be
|
@ -343,44 +343,45 @@ func TestUpdateResult(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
node := schedulercache.NewNodeInfo()
|
||||
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
|
||||
node.SetNode(testNode)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := schedulercache.NewNodeInfo()
|
||||
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
|
||||
node.SetNode(testNode)
|
||||
|
||||
// Initialize and populate equivalence class cache.
|
||||
ecache := NewCache()
|
||||
nodeCache, _ := ecache.GetNodeCache(testNode.Name)
|
||||
// Initialize and populate equivalence class cache.
|
||||
ecache := NewCache()
|
||||
nodeCache, _ := ecache.GetNodeCache(testNode.Name)
|
||||
|
||||
if test.expectPredicateMap {
|
||||
predicateItem := predicateResult{
|
||||
Fit: true,
|
||||
}
|
||||
nodeCache.cache[test.predicateKey] =
|
||||
resultMap{
|
||||
test.equivalenceHash: predicateItem,
|
||||
if test.expectPredicateMap {
|
||||
predicateItem := predicateResult{
|
||||
Fit: true,
|
||||
}
|
||||
}
|
||||
|
||||
nodeCache.updateResult(
|
||||
test.pod,
|
||||
test.predicateKey,
|
||||
test.fit,
|
||||
test.reasons,
|
||||
test.equivalenceHash,
|
||||
test.cache,
|
||||
node,
|
||||
)
|
||||
|
||||
cachedMapItem, ok := nodeCache.cache[test.predicateKey]
|
||||
if !ok {
|
||||
t.Errorf("Failed: %s, can't find expected cache item: %v",
|
||||
test.name, test.expectCacheItem)
|
||||
} else {
|
||||
if !reflect.DeepEqual(cachedMapItem[test.equivalenceHash], test.expectCacheItem) {
|
||||
t.Errorf("Failed: %s, expected cached item: %v, but got: %v",
|
||||
test.name, test.expectCacheItem, cachedMapItem[test.equivalenceHash])
|
||||
nodeCache.cache[test.predicateKey] =
|
||||
resultMap{
|
||||
test.equivalenceHash: predicateItem,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeCache.updateResult(
|
||||
test.pod,
|
||||
test.predicateKey,
|
||||
test.fit,
|
||||
test.reasons,
|
||||
test.equivalenceHash,
|
||||
test.cache,
|
||||
node,
|
||||
)
|
||||
|
||||
cachedMapItem, ok := nodeCache.cache[test.predicateKey]
|
||||
if !ok {
|
||||
t.Errorf("can't find expected cache item: %v", test.expectCacheItem)
|
||||
} else {
|
||||
if !reflect.DeepEqual(cachedMapItem[test.equivalenceHash], test.expectCacheItem) {
|
||||
t.Errorf("expected cached item: %v, but got: %v",
|
||||
test.expectCacheItem, cachedMapItem[test.equivalenceHash])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -481,61 +482,63 @@ func TestLookupResult(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
|
||||
|
||||
// Initialize and populate equivalence class cache.
|
||||
ecache := NewCache()
|
||||
nodeCache, _ := ecache.GetNodeCache(testNode.Name)
|
||||
// Initialize and populate equivalence class cache.
|
||||
ecache := NewCache()
|
||||
nodeCache, _ := ecache.GetNodeCache(testNode.Name)
|
||||
|
||||
node := schedulercache.NewNodeInfo()
|
||||
node.SetNode(testNode)
|
||||
// set cached item to equivalence cache
|
||||
nodeCache.updateResult(
|
||||
test.podName,
|
||||
test.predicateKey,
|
||||
test.cachedItem.fit,
|
||||
test.cachedItem.reasons,
|
||||
test.equivalenceHashForUpdatePredicate,
|
||||
test.cache,
|
||||
node,
|
||||
)
|
||||
// if we want to do invalid, invalid the cached item
|
||||
if test.expectedPredicateKeyMiss {
|
||||
predicateKeys := sets.NewString()
|
||||
predicateKeys.Insert(test.predicateKey)
|
||||
ecache.InvalidatePredicatesOnNode(test.nodeName, predicateKeys)
|
||||
}
|
||||
// calculate predicate with equivalence cache
|
||||
result, ok := nodeCache.lookupResult(test.podName,
|
||||
test.nodeName,
|
||||
test.predicateKey,
|
||||
test.equivalenceHashForCalPredicate,
|
||||
)
|
||||
fit, reasons := result.Fit, result.FailReasons
|
||||
// returned invalid should match expectedPredicateKeyMiss or expectedEquivalenceHashMiss
|
||||
if test.equivalenceHashForUpdatePredicate != test.equivalenceHashForCalPredicate {
|
||||
if ok && test.expectedEquivalenceHashMiss {
|
||||
t.Errorf("Failed: %s, expected (equivalence hash) cache miss", test.name)
|
||||
node := schedulercache.NewNodeInfo()
|
||||
node.SetNode(testNode)
|
||||
// set cached item to equivalence cache
|
||||
nodeCache.updateResult(
|
||||
test.podName,
|
||||
test.predicateKey,
|
||||
test.cachedItem.fit,
|
||||
test.cachedItem.reasons,
|
||||
test.equivalenceHashForUpdatePredicate,
|
||||
test.cache,
|
||||
node,
|
||||
)
|
||||
// if we want to do invalid, invalid the cached item
|
||||
if test.expectedPredicateKeyMiss {
|
||||
predicateKeys := sets.NewString()
|
||||
predicateKeys.Insert(test.predicateKey)
|
||||
ecache.InvalidatePredicatesOnNode(test.nodeName, predicateKeys)
|
||||
}
|
||||
if !ok && !test.expectedEquivalenceHashMiss {
|
||||
t.Errorf("Failed: %s, expected (equivalence hash) cache hit", test.name)
|
||||
// calculate predicate with equivalence cache
|
||||
result, ok := nodeCache.lookupResult(test.podName,
|
||||
test.nodeName,
|
||||
test.predicateKey,
|
||||
test.equivalenceHashForCalPredicate,
|
||||
)
|
||||
fit, reasons := result.Fit, result.FailReasons
|
||||
// returned invalid should match expectedPredicateKeyMiss or expectedEquivalenceHashMiss
|
||||
if test.equivalenceHashForUpdatePredicate != test.equivalenceHashForCalPredicate {
|
||||
if ok && test.expectedEquivalenceHashMiss {
|
||||
t.Errorf("Failed: %s, expected (equivalence hash) cache miss", test.name)
|
||||
}
|
||||
if !ok && !test.expectedEquivalenceHashMiss {
|
||||
t.Errorf("Failed: %s, expected (equivalence hash) cache hit", test.name)
|
||||
}
|
||||
} else {
|
||||
if ok && test.expectedPredicateKeyMiss {
|
||||
t.Errorf("Failed: %s, expected (predicate key) cache miss", test.name)
|
||||
}
|
||||
if !ok && !test.expectedPredicateKeyMiss {
|
||||
t.Errorf("Failed: %s, expected (predicate key) cache hit", test.name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if ok && test.expectedPredicateKeyMiss {
|
||||
t.Errorf("Failed: %s, expected (predicate key) cache miss", test.name)
|
||||
// returned predicate result should match expected predicate item
|
||||
if fit != test.expectedPredicateItem.fit {
|
||||
t.Errorf("Failed: %s, expected fit: %v, but got: %v", test.name, test.cachedItem.fit, fit)
|
||||
}
|
||||
if !ok && !test.expectedPredicateKeyMiss {
|
||||
t.Errorf("Failed: %s, expected (predicate key) cache hit", test.name)
|
||||
if !slicesEqual(reasons, test.expectedPredicateItem.reasons) {
|
||||
t.Errorf("Failed: %s, expected reasons: %v, but got: %v",
|
||||
test.name, test.expectedPredicateItem.reasons, reasons)
|
||||
}
|
||||
}
|
||||
// returned predicate result should match expected predicate item
|
||||
if fit != test.expectedPredicateItem.fit {
|
||||
t.Errorf("Failed: %s, expected fit: %v, but got: %v", test.name, test.cachedItem.fit, fit)
|
||||
}
|
||||
if !slicesEqual(reasons, test.expectedPredicateItem.reasons) {
|
||||
t.Errorf("Failed: %s, expected reasons: %v, but got: %v",
|
||||
test.name, test.expectedPredicateItem.reasons, reasons)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -658,6 +661,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
|
|||
testPredicate := "GeneralPredicates"
|
||||
// tests is used to initialize all nodes
|
||||
tests := []struct {
|
||||
name string
|
||||
podName string
|
||||
nodeName string
|
||||
equivalenceHashForUpdatePredicate uint64
|
||||
|
@ -665,6 +669,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
|
|||
cache schedulercache.Cache
|
||||
}{
|
||||
{
|
||||
name: "hash predicate 123 not fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node1",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
|
@ -677,6 +682,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
|
|||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
name: "hash predicate 456 not fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node2",
|
||||
equivalenceHashForUpdatePredicate: 456,
|
||||
|
@ -689,6 +695,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
|
|||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
name: "hash predicate 123 fits",
|
||||
podName: "testPod",
|
||||
nodeName: "node3",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
|
@ -723,13 +730,14 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
|
|||
|
||||
// there should be no cached predicate any more
|
||||
for _, test := range tests {
|
||||
if nodeCache, exist := ecache.nodeToCache[test.nodeName]; exist {
|
||||
if _, exist := nodeCache.cache[testPredicate]; exist {
|
||||
t.Errorf("Failed: cached item for predicate key: %v on node: %v should be invalidated",
|
||||
testPredicate, test.nodeName)
|
||||
break
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if nodeCache, exist := ecache.nodeToCache[test.nodeName]; exist {
|
||||
if _, exist := nodeCache.cache[testPredicate]; exist {
|
||||
t.Errorf("Failed: cached item for predicate key: %v on node: %v should be invalidated",
|
||||
testPredicate, test.nodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -737,6 +745,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
|
|||
testPredicate := "GeneralPredicates"
|
||||
// tests is used to initialize all nodes
|
||||
tests := []struct {
|
||||
name string
|
||||
podName string
|
||||
nodeName string
|
||||
equivalenceHashForUpdatePredicate uint64
|
||||
|
@ -744,6 +753,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
|
|||
cache schedulercache.Cache
|
||||
}{
|
||||
{
|
||||
name: "hash predicate 123 not fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node1",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
|
@ -754,6 +764,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
|
|||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
name: "hash predicate 456 not fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node2",
|
||||
equivalenceHashForUpdatePredicate: 456,
|
||||
|
@ -764,6 +775,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
|
|||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
name: "hash predicate 123 fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node3",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
|
@ -794,12 +806,13 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// invalidate all cached predicate for node
|
||||
ecache.InvalidateAllPredicatesOnNode(test.nodeName)
|
||||
if _, ok := ecache.GetNodeCache(test.nodeName); ok {
|
||||
t.Errorf("Failed: node: %v should not be found in internal cache", test.nodeName)
|
||||
break
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// invalidate cached predicate for all nodes
|
||||
ecache.InvalidateAllPredicatesOnNode(test.nodeName)
|
||||
if _, ok := ecache.GetNodeCache(test.nodeName); ok {
|
||||
t.Errorf("Failed: cached item for node: %v should be invalidated", test.nodeName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -492,43 +492,45 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
extenders := []algorithm.SchedulerExtender{}
|
||||
for ii := range test.extenders {
|
||||
extenders = append(extenders, &test.extenders[ii])
|
||||
}
|
||||
cache := schedulercache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, name := range test.nodes {
|
||||
cache.AddNode(createNode(name))
|
||||
}
|
||||
queue := NewSchedulingQueue()
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
nil,
|
||||
queue,
|
||||
test.predicates,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
test.prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
extenders,
|
||||
nil,
|
||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
||||
false,
|
||||
false)
|
||||
podIgnored := &v1.Pod{}
|
||||
machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
|
||||
if test.expectsErr {
|
||||
if err == nil {
|
||||
t.Errorf("Unexpected non-error for %s, machine %s", test.name, machine)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
extenders := []algorithm.SchedulerExtender{}
|
||||
for ii := range test.extenders {
|
||||
extenders = append(extenders, &test.extenders[ii])
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
cache := schedulercache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, name := range test.nodes {
|
||||
cache.AddNode(createNode(name))
|
||||
}
|
||||
if test.expectedHost != machine {
|
||||
t.Errorf("Failed : %s, Expected: %s, Saw: %s", test.name, test.expectedHost, machine)
|
||||
queue := NewSchedulingQueue()
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
nil,
|
||||
queue,
|
||||
test.predicates,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
test.prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
extenders,
|
||||
nil,
|
||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
||||
false,
|
||||
false)
|
||||
podIgnored := &v1.Pod{}
|
||||
machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
|
||||
if test.expectsErr {
|
||||
if err == nil {
|
||||
t.Errorf("Unexpected non-error, machine %s", machine)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
if test.expectedHost != machine {
|
||||
t.Errorf("Expected: %s, Saw: %s", test.expectedHost, machine)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -123,11 +123,13 @@ func makeNodeList(nodeNames []string) []*v1.Node {
|
|||
func TestSelectHost(t *testing.T) {
|
||||
scheduler := genericScheduler{}
|
||||
tests := []struct {
|
||||
name string
|
||||
list schedulerapi.HostPriorityList
|
||||
possibleHosts sets.String
|
||||
expectsErr bool
|
||||
}{
|
||||
{
|
||||
name: "unique properly ordered scores",
|
||||
list: []schedulerapi.HostPriority{
|
||||
{Host: "machine1.1", Score: 1},
|
||||
{Host: "machine2.1", Score: 2},
|
||||
|
@ -135,8 +137,8 @@ func TestSelectHost(t *testing.T) {
|
|||
possibleHosts: sets.NewString("machine2.1"),
|
||||
expectsErr: false,
|
||||
},
|
||||
// equal scores
|
||||
{
|
||||
name: "equal scores",
|
||||
list: []schedulerapi.HostPriority{
|
||||
{Host: "machine1.1", Score: 1},
|
||||
{Host: "machine1.2", Score: 2},
|
||||
|
@ -146,8 +148,8 @@ func TestSelectHost(t *testing.T) {
|
|||
possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"),
|
||||
expectsErr: false,
|
||||
},
|
||||
// out of order scores
|
||||
{
|
||||
name: "out of order scores",
|
||||
list: []schedulerapi.HostPriority{
|
||||
{Host: "machine1.1", Score: 3},
|
||||
{Host: "machine1.2", Score: 3},
|
||||
|
@ -158,8 +160,8 @@ func TestSelectHost(t *testing.T) {
|
|||
possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"),
|
||||
expectsErr: false,
|
||||
},
|
||||
// empty priorityList
|
||||
{
|
||||
name: "empty priority list",
|
||||
list: []schedulerapi.HostPriority{},
|
||||
possibleHosts: sets.NewString(),
|
||||
expectsErr: true,
|
||||
|
@ -167,22 +169,24 @@ func TestSelectHost(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// increase the randomness
|
||||
for i := 0; i < 10; i++ {
|
||||
got, err := scheduler.selectHost(test.list)
|
||||
if test.expectsErr {
|
||||
if err == nil {
|
||||
t.Error("Unexpected non-error")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !test.possibleHosts.Has(got) {
|
||||
t.Errorf("got %s is not in the possible map %v", got, test.possibleHosts)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// increase the randomness
|
||||
for i := 0; i < 10; i++ {
|
||||
got, err := scheduler.selectHost(test.list)
|
||||
if test.expectsErr {
|
||||
if err == nil {
|
||||
t.Error("Unexpected non-error")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !test.possibleHosts.Has(got) {
|
||||
t.Errorf("got %s is not in the possible map %v", got, test.possibleHosts)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -398,39 +402,41 @@ func TestGenericScheduler(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
cache := schedulercache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, pod := range test.pods {
|
||||
cache.AddPod(pod)
|
||||
}
|
||||
for _, name := range test.nodes {
|
||||
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}})
|
||||
}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
pvcs = append(pvcs, test.pvcs...)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cache := schedulercache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, pod := range test.pods {
|
||||
cache.AddPod(pod)
|
||||
}
|
||||
for _, name := range test.nodes {
|
||||
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}})
|
||||
}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
pvcs = append(pvcs, test.pvcs...)
|
||||
|
||||
pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs)
|
||||
pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs)
|
||||
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
nil,
|
||||
NewSchedulingQueue(),
|
||||
test.predicates,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
test.prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
nil,
|
||||
pvcLister,
|
||||
test.alwaysCheckAllPredicates,
|
||||
false)
|
||||
machine, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
nil,
|
||||
NewSchedulingQueue(),
|
||||
test.predicates,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
test.prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
nil,
|
||||
pvcLister,
|
||||
test.alwaysCheckAllPredicates,
|
||||
false)
|
||||
machine, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
|
||||
|
||||
if !reflect.DeepEqual(err, test.wErr) {
|
||||
t.Errorf("Failed : %s, Unexpected error: %v, expected: %v", test.name, err, test.wErr)
|
||||
}
|
||||
if test.expectedHosts != nil && !test.expectedHosts.Has(machine) {
|
||||
t.Errorf("Failed : %s, Expected: %s, got: %s", test.name, test.expectedHosts, machine)
|
||||
}
|
||||
if !reflect.DeepEqual(err, test.wErr) {
|
||||
t.Errorf("Unexpected error: %v, expected: %v", err, test.wErr)
|
||||
}
|
||||
if test.expectedHosts != nil && !test.expectedHosts.Has(machine) {
|
||||
t.Errorf("Expected: %s, got: %s", test.expectedHosts, machine)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -473,13 +479,15 @@ func TestFindFitAllError(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
failures, found := predicateMap[node.Name]
|
||||
if !found {
|
||||
t.Errorf("failed to find node: %s in %v", node.Name, predicateMap)
|
||||
}
|
||||
if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate {
|
||||
t.Errorf("unexpected failures: %v", failures)
|
||||
}
|
||||
t.Run(node.Name, func(t *testing.T) {
|
||||
failures, found := predicateMap[node.Name]
|
||||
if !found {
|
||||
t.Errorf("failed to find node in %v", predicateMap)
|
||||
}
|
||||
if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate {
|
||||
t.Errorf("unexpected failures: %v", failures)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -503,13 +511,15 @@ func TestFindFitSomeError(t *testing.T) {
|
|||
if node.Name == pod.Name {
|
||||
continue
|
||||
}
|
||||
failures, found := predicateMap[node.Name]
|
||||
if !found {
|
||||
t.Errorf("failed to find node: %s in %v", node.Name, predicateMap)
|
||||
}
|
||||
if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate {
|
||||
t.Errorf("unexpected failures: %v", failures)
|
||||
}
|
||||
t.Run(node.Name, func(t *testing.T) {
|
||||
failures, found := predicateMap[node.Name]
|
||||
if !found {
|
||||
t.Errorf("failed to find node in %v", predicateMap)
|
||||
}
|
||||
if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate {
|
||||
t.Errorf("unexpected failures: %v", failures)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -604,7 +614,7 @@ func TestZeroRequest(t *testing.T) {
|
|||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
test string
|
||||
name string
|
||||
}{
|
||||
// The point of these next two tests is to show you get the same priority for a zero-request pod
|
||||
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine
|
||||
|
@ -612,7 +622,7 @@ func TestZeroRequest(t *testing.T) {
|
|||
{
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
test: "test priority of zero-request pod with machine with zero-request pod",
|
||||
name: "test priority of zero-request pod with machine with zero-request pod",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
{Spec: large2}, {Spec: small2},
|
||||
|
@ -621,7 +631,7 @@ func TestZeroRequest(t *testing.T) {
|
|||
{
|
||||
pod: &v1.Pod{Spec: small},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
test: "test priority of nonzero-request pod with machine with zero-request pod",
|
||||
name: "test priority of nonzero-request pod with machine with zero-request pod",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
{Spec: large2}, {Spec: small2},
|
||||
|
@ -631,7 +641,7 @@ func TestZeroRequest(t *testing.T) {
|
|||
{
|
||||
pod: &v1.Pod{Spec: large},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
test: "test priority of larger pod with machine with zero-request pod",
|
||||
name: "test priority of larger pod with machine with zero-request pod",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
{Spec: large2}, {Spec: small2},
|
||||
|
@ -641,47 +651,49 @@ func TestZeroRequest(t *testing.T) {
|
|||
|
||||
const expectedPriority int = 25
|
||||
for _, test := range tests {
|
||||
// This should match the configuration in defaultPriorities() in
|
||||
// pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
|
||||
// to test what's actually in production.
|
||||
priorityConfigs := []algorithm.PriorityConfig{
|
||||
{Map: algorithmpriorities.LeastRequestedPriorityMap, Weight: 1},
|
||||
{Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1},
|
||||
}
|
||||
selectorSpreadPriorityMap, selectorSpreadPriorityReduce := algorithmpriorities.NewSelectorSpreadPriority(
|
||||
schedulertesting.FakeServiceLister([]*v1.Service{}),
|
||||
schedulertesting.FakeControllerLister([]*v1.ReplicationController{}),
|
||||
schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}),
|
||||
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
|
||||
pc := algorithm.PriorityConfig{Map: selectorSpreadPriorityMap, Reduce: selectorSpreadPriorityReduce, Weight: 1}
|
||||
priorityConfigs = append(priorityConfigs, pc)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// This should match the configuration in defaultPriorities() in
|
||||
// pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
|
||||
// to test what's actually in production.
|
||||
priorityConfigs := []algorithm.PriorityConfig{
|
||||
{Map: algorithmpriorities.LeastRequestedPriorityMap, Weight: 1},
|
||||
{Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1},
|
||||
}
|
||||
selectorSpreadPriorityMap, selectorSpreadPriorityReduce := algorithmpriorities.NewSelectorSpreadPriority(
|
||||
schedulertesting.FakeServiceLister([]*v1.Service{}),
|
||||
schedulertesting.FakeControllerLister([]*v1.ReplicationController{}),
|
||||
schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}),
|
||||
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
|
||||
pc := algorithm.PriorityConfig{Map: selectorSpreadPriorityMap, Reduce: selectorSpreadPriorityReduce, Weight: 1}
|
||||
priorityConfigs = append(priorityConfigs, pc)
|
||||
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||
|
||||
mataDataProducer := algorithmpriorities.NewPriorityMetadataFactory(
|
||||
schedulertesting.FakeServiceLister([]*v1.Service{}),
|
||||
schedulertesting.FakeControllerLister([]*v1.ReplicationController{}),
|
||||
schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}),
|
||||
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
|
||||
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
||||
mataDataProducer := algorithmpriorities.NewPriorityMetadataFactory(
|
||||
schedulertesting.FakeServiceLister([]*v1.Service{}),
|
||||
schedulertesting.FakeControllerLister([]*v1.ReplicationController{}),
|
||||
schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}),
|
||||
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
|
||||
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
||||
|
||||
list, err := PrioritizeNodes(
|
||||
test.pod, nodeNameToInfo, mataData, priorityConfigs,
|
||||
schedulertesting.FakeNodeLister(test.nodes), []algorithm.SchedulerExtender{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
for _, hp := range list {
|
||||
if test.test == "test priority of larger pod with machine with zero-request pod" {
|
||||
if hp.Score == expectedPriority {
|
||||
t.Errorf("%s: expected non-%d for all priorities, got list %#v", test.test, expectedPriority, list)
|
||||
}
|
||||
} else {
|
||||
if hp.Score != expectedPriority {
|
||||
t.Errorf("%s: expected %d for all priorities, got list %#v", test.test, expectedPriority, list)
|
||||
list, err := PrioritizeNodes(
|
||||
test.pod, nodeNameToInfo, mataData, priorityConfigs,
|
||||
schedulertesting.FakeNodeLister(test.nodes), []algorithm.SchedulerExtender{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
for _, hp := range list {
|
||||
if test.name == "test priority of larger pod with machine with zero-request pod" {
|
||||
if hp.Score == expectedPriority {
|
||||
t.Errorf("expected non-%d for all priorities, got list %#v", expectedPriority, list)
|
||||
}
|
||||
} else {
|
||||
if hp.Score != expectedPriority {
|
||||
t.Errorf("expected %d for all priorities, got list %#v", expectedPriority, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -697,30 +709,30 @@ func printNodeToVictims(nodeToVictims map[*v1.Node]*schedulerapi.Victims) string
|
|||
return output
|
||||
}
|
||||
|
||||
func checkPreemptionVictims(testName string, expected map[string]map[string]bool, nodeToPods map[*v1.Node]*schedulerapi.Victims) error {
|
||||
func checkPreemptionVictims(expected map[string]map[string]bool, nodeToPods map[*v1.Node]*schedulerapi.Victims) error {
|
||||
if len(expected) == len(nodeToPods) {
|
||||
for k, victims := range nodeToPods {
|
||||
if expPods, ok := expected[k.Name]; ok {
|
||||
if len(victims.Pods) != len(expPods) {
|
||||
return fmt.Errorf("test [%v]: unexpected number of pods. expected: %v, got: %v", testName, expected, printNodeToVictims(nodeToPods))
|
||||
return fmt.Errorf("unexpected number of pods. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
|
||||
}
|
||||
prevPriority := int32(math.MaxInt32)
|
||||
for _, p := range victims.Pods {
|
||||
// Check that pods are sorted by their priority.
|
||||
if *p.Spec.Priority > prevPriority {
|
||||
return fmt.Errorf("test [%v]: pod %v of node %v was not sorted by priority", testName, p.Name, k)
|
||||
return fmt.Errorf("pod %v of node %v was not sorted by priority", p.Name, k)
|
||||
}
|
||||
prevPriority = *p.Spec.Priority
|
||||
if _, ok := expPods[p.Name]; !ok {
|
||||
return fmt.Errorf("test [%v]: pod %v was not expected. Expected: %v", testName, p.Name, expPods)
|
||||
return fmt.Errorf("pod %v was not expected. Expected: %v", p.Name, expPods)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("test [%v]: unexpected machines. expected: %v, got: %v", testName, expected, printNodeToVictims(nodeToPods))
|
||||
return fmt.Errorf("unexpected machines. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("test [%v]: unexpected number of machines. expected: %v, got: %v", testName, expected, printNodeToVictims(nodeToPods))
|
||||
return fmt.Errorf("unexpected number of machines. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -906,23 +918,25 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
nodes := []*v1.Node{}
|
||||
for _, n := range test.nodes {
|
||||
node := makeNode(n, 1000*5, priorityutil.DefaultMemoryRequest*5)
|
||||
node.ObjectMeta.Labels = map[string]string{"hostname": node.Name}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
if test.addAffinityPredicate {
|
||||
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
|
||||
}
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
|
||||
nodeToPods, err := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := checkPreemptionVictims(test.name, test.expected, nodeToPods); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := []*v1.Node{}
|
||||
for _, n := range test.nodes {
|
||||
node := makeNode(n, 1000*5, priorityutil.DefaultMemoryRequest*5)
|
||||
node.ObjectMeta.Labels = map[string]string{"hostname": node.Name}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
if test.addAffinityPredicate {
|
||||
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
|
||||
}
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
|
||||
nodeToPods, err := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := checkPreemptionVictims(test.expected, nodeToPods); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1069,23 +1083,25 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
nodes := []*v1.Node{}
|
||||
for _, n := range test.nodes {
|
||||
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
|
||||
}
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
|
||||
candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
|
||||
node := pickOneNodeForPreemption(candidateNodes)
|
||||
found := false
|
||||
for _, nodeName := range test.expected {
|
||||
if node.Name == nodeName {
|
||||
found = true
|
||||
break
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := []*v1.Node{}
|
||||
for _, n := range test.nodes {
|
||||
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("test [%v]: unexpected node: %v", test.name, node)
|
||||
}
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
|
||||
candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
|
||||
node := pickOneNodeForPreemption(candidateNodes)
|
||||
found := false
|
||||
for _, nodeName := range test.expected {
|
||||
if node.Name == nodeName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("unexpected node: %v", node)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1159,15 +1175,17 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
nodes := nodesWherePreemptionMightHelp(makeNodeList(nodeNames), test.failedPredMap)
|
||||
if len(test.expected) != len(nodes) {
|
||||
t.Errorf("test [%v]:number of nodes is not the same as expected. exptectd: %d, got: %d. Nodes: %v", test.name, len(test.expected), len(nodes), nodes)
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if _, found := test.expected[node.Name]; !found {
|
||||
t.Errorf("test [%v]: node %v is not expected.", test.name, node.Name)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := nodesWherePreemptionMightHelp(makeNodeList(nodeNames), test.failedPredMap)
|
||||
if len(test.expected) != len(nodes) {
|
||||
t.Errorf("number of nodes is not the same as expected. exptectd: %d, got: %d. Nodes: %v", len(test.expected), len(nodes), nodes)
|
||||
}
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if _, found := test.expected[node.Name]; !found {
|
||||
t.Errorf("node %v is not expected.", node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1290,76 +1308,78 @@ func TestPreempt(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
stop := make(chan struct{})
|
||||
cache := schedulercache.New(time.Duration(0), stop)
|
||||
for _, pod := range test.pods {
|
||||
cache.AddPod(pod)
|
||||
}
|
||||
cachedNodeInfoMap := map[string]*schedulercache.NodeInfo{}
|
||||
for _, name := range nodeNames {
|
||||
node := makeNode(name, 1000*5, priorityutil.DefaultMemoryRequest*5)
|
||||
cache.AddNode(node)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
cache := schedulercache.New(time.Duration(0), stop)
|
||||
for _, pod := range test.pods {
|
||||
cache.AddPod(pod)
|
||||
}
|
||||
cachedNodeInfoMap := map[string]*schedulercache.NodeInfo{}
|
||||
for _, name := range nodeNames {
|
||||
node := makeNode(name, 1000*5, priorityutil.DefaultMemoryRequest*5)
|
||||
cache.AddNode(node)
|
||||
|
||||
// Set nodeInfo to extenders to mock extenders' cache for preemption.
|
||||
cachedNodeInfo := schedulercache.NewNodeInfo()
|
||||
cachedNodeInfo.SetNode(node)
|
||||
cachedNodeInfoMap[name] = cachedNodeInfo
|
||||
}
|
||||
extenders := []algorithm.SchedulerExtender{}
|
||||
for _, extender := range test.extenders {
|
||||
// Set nodeInfoMap as extenders cached node information.
|
||||
extender.cachedNodeNameToInfo = cachedNodeInfoMap
|
||||
extenders = append(extenders, extender)
|
||||
}
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
nil,
|
||||
NewSchedulingQueue(),
|
||||
map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
[]algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
extenders,
|
||||
nil,
|
||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
||||
false,
|
||||
false)
|
||||
// Call Preempt and check the expected results.
|
||||
node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
|
||||
if err != nil {
|
||||
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err)
|
||||
}
|
||||
if (node != nil && node.Name != test.expectedNode) || (node == nil && len(test.expectedNode) != 0) {
|
||||
t.Errorf("test [%v]: expected node: %v, got: %v", test.name, test.expectedNode, node)
|
||||
}
|
||||
if len(victims) != len(test.expectedPods) {
|
||||
t.Errorf("test [%v]: expected %v pods, got %v.", test.name, len(test.expectedPods), len(victims))
|
||||
}
|
||||
for _, victim := range victims {
|
||||
found := false
|
||||
for _, expPod := range test.expectedPods {
|
||||
if expPod == victim.Name {
|
||||
found = true
|
||||
break
|
||||
// Set nodeInfo to extenders to mock extenders' cache for preemption.
|
||||
cachedNodeInfo := schedulercache.NewNodeInfo()
|
||||
cachedNodeInfo.SetNode(node)
|
||||
cachedNodeInfoMap[name] = cachedNodeInfo
|
||||
}
|
||||
extenders := []algorithm.SchedulerExtender{}
|
||||
for _, extender := range test.extenders {
|
||||
// Set nodeInfoMap as extenders cached node information.
|
||||
extender.cachedNodeNameToInfo = cachedNodeInfoMap
|
||||
extenders = append(extenders, extender)
|
||||
}
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
nil,
|
||||
NewSchedulingQueue(),
|
||||
map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
[]algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
extenders,
|
||||
nil,
|
||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
||||
false,
|
||||
false)
|
||||
// Call Preempt and check the expected results.
|
||||
node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error in preemption: %v", err)
|
||||
}
|
||||
if (node != nil && node.Name != test.expectedNode) || (node == nil && len(test.expectedNode) != 0) {
|
||||
t.Errorf("expected node: %v, got: %v", test.expectedNode, node)
|
||||
}
|
||||
if len(victims) != len(test.expectedPods) {
|
||||
t.Errorf("expected %v pods, got %v.", len(test.expectedPods), len(victims))
|
||||
}
|
||||
for _, victim := range victims {
|
||||
found := false
|
||||
for _, expPod := range test.expectedPods {
|
||||
if expPod == victim.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("pod %v is not expected to be a victim.", victim.Name)
|
||||
}
|
||||
// Mark the victims for deletion and record the preemptor's nominated node name.
|
||||
now := metav1.Now()
|
||||
victim.DeletionTimestamp = &now
|
||||
test.pod.Status.NominatedNodeName = node.Name
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("test [%v]: pod %v is not expected to be a victim.", test.name, victim.Name)
|
||||
// Call preempt again and make sure it doesn't preempt any more pods.
|
||||
node, victims, _, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error in preemption: %v", err)
|
||||
}
|
||||
// Mark the victims for deletion and record the preemptor's nominated node name.
|
||||
now := metav1.Now()
|
||||
victim.DeletionTimestamp = &now
|
||||
test.pod.Status.NominatedNodeName = node.Name
|
||||
}
|
||||
// Call preempt again and make sure it doesn't preempt any more pods.
|
||||
node, victims, _, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
|
||||
if err != nil {
|
||||
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err)
|
||||
}
|
||||
if node != nil && len(victims) > 0 {
|
||||
t.Errorf("test [%v]: didn't expect any more preemption. Node %v is selected for preemption.", test.name, node)
|
||||
}
|
||||
close(stop)
|
||||
if node != nil && len(victims) > 0 {
|
||||
t.Errorf("didn't expect any more preemption. Node %v is selected for preemption.", node)
|
||||
}
|
||||
close(stop)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -375,6 +375,7 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
|||
updatedPods[3] = pods[3].DeepCopy()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
podsToAdd []*v1.Pod
|
||||
expectedMapAfterAdd map[string]*v1.Pod
|
||||
podsToUpdate []*v1.Pod
|
||||
|
@ -383,6 +384,7 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
|||
expectedMapAfterDelete map[string]*v1.Pod
|
||||
}{
|
||||
{
|
||||
name: "create, update, delete subset of pods",
|
||||
podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]},
|
||||
expectedMapAfterAdd: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[0]): pods[0],
|
||||
|
@ -404,6 +406,7 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "create, update, delete all",
|
||||
podsToAdd: []*v1.Pod{pods[0], pods[3]},
|
||||
expectedMapAfterAdd: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[0]): pods[0],
|
||||
|
@ -418,6 +421,7 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
|||
expectedMapAfterDelete: map[string]*v1.Pod{},
|
||||
},
|
||||
{
|
||||
name: "delete non-existing and existing pods",
|
||||
podsToAdd: []*v1.Pod{pods[1], pods[2]},
|
||||
expectedMapAfterAdd: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[1]): pods[1],
|
||||
|
@ -435,35 +439,37 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
upm := newUnschedulablePodsMap()
|
||||
for _, p := range test.podsToAdd {
|
||||
upm.addOrUpdate(p)
|
||||
}
|
||||
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterAdd) {
|
||||
t.Errorf("#%d: Unexpected map after adding pods. Expected: %v, got: %v",
|
||||
i, test.expectedMapAfterAdd, upm.pods)
|
||||
}
|
||||
|
||||
if len(test.podsToUpdate) > 0 {
|
||||
for _, p := range test.podsToUpdate {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
upm := newUnschedulablePodsMap()
|
||||
for _, p := range test.podsToAdd {
|
||||
upm.addOrUpdate(p)
|
||||
}
|
||||
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterUpdate) {
|
||||
t.Errorf("#%d: Unexpected map after updating pods. Expected: %v, got: %v",
|
||||
i, test.expectedMapAfterUpdate, upm.pods)
|
||||
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterAdd) {
|
||||
t.Errorf("Unexpected map after adding pods. Expected: %v, got: %v",
|
||||
test.expectedMapAfterAdd, upm.pods)
|
||||
}
|
||||
}
|
||||
for _, p := range test.podsToDelete {
|
||||
upm.delete(p)
|
||||
}
|
||||
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterDelete) {
|
||||
t.Errorf("#%d: Unexpected map after deleting pods. Expected: %v, got: %v",
|
||||
i, test.expectedMapAfterDelete, upm.pods)
|
||||
}
|
||||
upm.clear()
|
||||
if len(upm.pods) != 0 {
|
||||
t.Errorf("Expected the map to be empty, but has %v elements.", len(upm.pods))
|
||||
}
|
||||
|
||||
if len(test.podsToUpdate) > 0 {
|
||||
for _, p := range test.podsToUpdate {
|
||||
upm.addOrUpdate(p)
|
||||
}
|
||||
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterUpdate) {
|
||||
t.Errorf("Unexpected map after updating pods. Expected: %v, got: %v",
|
||||
test.expectedMapAfterUpdate, upm.pods)
|
||||
}
|
||||
}
|
||||
for _, p := range test.podsToDelete {
|
||||
upm.delete(p)
|
||||
}
|
||||
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterDelete) {
|
||||
t.Errorf("Unexpected map after deleting pods. Expected: %v, got: %v",
|
||||
test.expectedMapAfterDelete, upm.pods)
|
||||
}
|
||||
upm.clear()
|
||||
if len(upm.pods) != 0 {
|
||||
t.Errorf("Expected the map to be empty, but has %v elements.", len(upm.pods))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue