Merge pull request #64692 from adohe/scheduler_cache

Automatic merge from submit-queue (batch tested with PRs 64882, 64692, 64389, 60626, 64840). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

update pod state of scheduler cache when UpdatePod

update pod state map in scheduler cache when call UpdatePod. @k82cn @bsalamat 

```release-note
keep pod state consistent when scheduler cache UpdatePod
```
pull/8/head
Kubernetes Submit Queue 2018-06-20 10:03:23 -07:00 committed by GitHub
commit bb6270bd92
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 60 additions and 0 deletions

View File

@ -317,6 +317,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
if err := cache.updatePod(oldPod, newPod); err != nil {
return err
}
currState.pod = newPod
default:
return fmt.Errorf("pod %v is not added to scheduler cache, so cannot be updated", key)
}

View File

@ -577,6 +577,65 @@ func TestUpdatePod(t *testing.T) {
}
}
// TestUpdatePodAndGet tests get always return latest pod state
func TestUpdatePodAndGet(t *testing.T) {
nodeName := "node"
ttl := 10 * time.Second
testPods := []*v1.Pod{
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
}
tests := []struct {
pod *v1.Pod
podToUpdate *v1.Pod
handler func(cache Cache, pod *v1.Pod) error
assumePod bool
}{
{
pod: testPods[0],
podToUpdate: testPods[0],
handler: func(cache Cache, pod *v1.Pod) error {
return cache.AssumePod(pod)
},
assumePod: true,
},
{
pod: testPods[0],
podToUpdate: testPods[1],
handler: func(cache Cache, pod *v1.Pod) error {
return cache.AddPod(pod)
},
assumePod: false,
},
}
for _, tt := range tests {
cache := newSchedulerCache(ttl, time.Second, nil)
if err := tt.handler(cache, tt.pod); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if !tt.assumePod {
if err := cache.UpdatePod(tt.pod, tt.podToUpdate); err != nil {
t.Fatalf("UpdatePod failed: %v", err)
}
}
cachedPod, err := cache.GetPod(tt.pod)
if err != nil {
t.Fatalf("GetPod failed: %v", err)
}
if !reflect.DeepEqual(tt.podToUpdate, cachedPod) {
t.Fatalf("pod get=%s, want=%s", cachedPod, tt.podToUpdate)
}
}
}
// TestExpireAddUpdatePod test the sequence that a pod is expired, added, then updated
func TestExpireAddUpdatePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation