Revert "Sync pods for daemon sets."

This reverts commit ffd34311c6.
pull/6/head
mqliang 2016-02-11 11:46:16 +08:00
parent d802778c20
commit 91124afdd7
2 changed files with 1 additions and 40 deletions

View File

@ -187,7 +187,6 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro
func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) { func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
glog.Infof("Starting Daemon Sets controller manager") glog.Infof("Starting Daemon Sets controller manager")
controller.SyncAllPodsWithStore(dsc.kubeClient, dsc.podStore.Store)
go dsc.dsController.Run(stopCh) go dsc.dsController.Run(stopCh)
go dsc.podController.Run(stopCh) go dsc.podController.Run(stopCh)
go dsc.nodeController.Run(stopCh) go dsc.nodeController.Run(stopCh)
@ -472,6 +471,7 @@ func storeDaemonSetStatus(dsClient unversioned_extensions.DaemonSetInterface, ds
if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled { if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled {
return nil return nil
} }
var updateErr, getErr error var updateErr, getErr error
for i := 0; i <= StatusUpdateRetries; i++ { for i := 0; i <= StatusUpdateRetries; i++ {
ds.Status.DesiredNumberScheduled = desiredNumberScheduled ds.Status.DesiredNumberScheduled = desiredNumberScheduled

View File

@ -18,7 +18,6 @@ package daemon
import ( import (
"fmt" "fmt"
"net/http/httptest"
"testing" "testing"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
@ -30,9 +29,7 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/securitycontext"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
) )
var ( var (
@ -457,39 +454,3 @@ func TestDSManagerNotReady(t *testing.T) {
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
func TestDSManagerInit(t *testing.T) {
// Insert a stable daemon set and make sure we don't create an extra pod
// for the one node which already has a daemon after a simulated restart.
ds := newDaemonSet("test")
ds.Status = extensions.DaemonSetStatus{
CurrentNumberScheduled: 1,
NumberMisscheduled: 0,
DesiredNumberScheduled: 1,
}
nodeName := "only-node"
podList := &api.PodList{
Items: []api.Pod{
*newPod("podname", nodeName, simpleDaemonSetLabel),
}}
response := runtime.EncodeOrDie(testapi.Default.Codec(), podList)
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: response,
}
testServer := httptest.NewServer(&fakeHandler)
// TODO: Uncomment when fix #19254
// defer testServer.Close()
clientset := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
manager := NewDaemonSetsController(clientset, controller.NoResyncPeriodFunc)
manager.dsStore.Add(ds)
manager.nodeStore.Add(newNode(nodeName, nil))
manager.podStoreSynced = alwaysReady
controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store)
fakePodControl := &controller.FakePodControl{}
manager.podControl = fakePodControl
manager.syncHandler(getKey(ds, t))
validateSyncDaemonSets(t, fakePodControl, 0, 0)
}