2014-06-06 23:40:48 +00:00
|
|
|
/*
|
2015-05-01 16:19:44 +00:00
|
|
|
Copyright 2014 The Kubernetes Authors All rights reserved.
|
2014-06-06 23:40:48 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
2014-06-17 21:49:44 +00:00
|
|
|
|
2016-01-17 22:26:25 +00:00
|
|
|
// If you make changes to this file, you should also make the corresponding change in ReplicaSet.
|
|
|
|
|
2015-10-10 03:58:57 +00:00
|
|
|
package replication
|
2014-06-06 23:40:48 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-04-17 00:37:57 +00:00
|
|
|
"math/rand"
|
2014-06-06 23:40:48 +00:00
|
|
|
"net/http/httptest"
|
2016-03-05 00:51:01 +00:00
|
|
|
"strings"
|
2014-06-06 23:40:48 +00:00
|
|
|
"testing"
|
2014-06-18 20:10:19 +00:00
|
|
|
"time"
|
2014-06-06 23:40:48 +00:00
|
|
|
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
|
|
|
"k8s.io/kubernetes/pkg/api/testapi"
|
2015-09-09 21:59:11 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
2015-09-03 21:40:58 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/cache"
|
2016-02-05 21:58:03 +00:00
|
|
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
2016-02-16 22:16:45 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
2016-02-12 18:58:43 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/restclient"
|
2016-01-15 05:00:58 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/testing/core"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/controller"
|
|
|
|
"k8s.io/kubernetes/pkg/runtime"
|
|
|
|
"k8s.io/kubernetes/pkg/securitycontext"
|
|
|
|
"k8s.io/kubernetes/pkg/util"
|
2015-09-09 17:45:01 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/sets"
|
2016-01-15 06:33:50 +00:00
|
|
|
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
2016-02-02 10:57:06 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/wait"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/watch"
|
2014-06-06 23:40:48 +00:00
|
|
|
)
|
|
|
|
|
2015-06-19 20:35:19 +00:00
|
|
|
var alwaysReady = func() bool { return true }
|
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
func getKey(rc *api.ReplicationController, t *testing.T) string {
|
2015-07-28 01:21:37 +00:00
|
|
|
if key, err := controller.KeyFunc(rc); err != nil {
|
2015-04-21 20:40:35 +00:00
|
|
|
t.Errorf("Unexpected error getting key for rc %v: %v", rc.Name, err)
|
|
|
|
return ""
|
|
|
|
} else {
|
|
|
|
return key
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newReplicationController(replicas int) *api.ReplicationController {
|
|
|
|
rc := &api.ReplicationController{
|
2015-11-30 20:28:48 +00:00
|
|
|
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
|
2015-04-21 20:40:35 +00:00
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: util.NewUUID(),
|
|
|
|
Name: "foobar",
|
|
|
|
Namespace: api.NamespaceDefault,
|
|
|
|
ResourceVersion: "18",
|
|
|
|
},
|
2014-11-07 02:09:46 +00:00
|
|
|
Spec: api.ReplicationControllerSpec{
|
2016-04-27 04:35:14 +00:00
|
|
|
Replicas: int32(replicas),
|
2015-04-21 20:40:35 +00:00
|
|
|
Selector: map[string]string{"foo": "bar"},
|
2014-11-07 02:09:46 +00:00
|
|
|
Template: &api.PodTemplateSpec{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
"name": "foo",
|
|
|
|
"type": "production",
|
2014-06-06 23:40:48 +00:00
|
|
|
},
|
|
|
|
},
|
2014-11-07 02:09:46 +00:00
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
|
|
|
Image: "foo/bar",
|
2015-02-13 19:57:27 +00:00
|
|
|
TerminationMessagePath: api.TerminationMessagePathDefault,
|
|
|
|
ImagePullPolicy: api.PullIfNotPresent,
|
2015-05-05 23:02:13 +00:00
|
|
|
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
2014-11-07 02:09:46 +00:00
|
|
|
},
|
|
|
|
},
|
2015-03-14 01:38:07 +00:00
|
|
|
RestartPolicy: api.RestartPolicyAlways,
|
|
|
|
DNSPolicy: api.DNSDefault,
|
2015-01-27 18:56:54 +00:00
|
|
|
NodeSelector: map[string]string{
|
|
|
|
"baz": "blah",
|
|
|
|
},
|
2014-06-06 23:40:48 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2015-04-21 20:40:35 +00:00
|
|
|
return rc
|
2014-06-06 23:40:48 +00:00
|
|
|
}
|
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
|
2016-03-11 18:34:13 +00:00
|
|
|
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController, name string) *api.PodList {
|
2014-06-12 20:17:34 +00:00
|
|
|
pods := []api.Pod{}
|
2014-06-06 23:40:48 +00:00
|
|
|
for i := 0; i < count; i++ {
|
2015-04-21 20:40:35 +00:00
|
|
|
newPod := api.Pod{
|
2014-10-23 20:51:34 +00:00
|
|
|
ObjectMeta: api.ObjectMeta{
|
2016-03-11 18:34:13 +00:00
|
|
|
Name: fmt.Sprintf("%s%d", name, i),
|
2015-04-21 20:40:35 +00:00
|
|
|
Labels: rc.Spec.Selector,
|
|
|
|
Namespace: rc.Namespace,
|
2014-06-06 23:40:48 +00:00
|
|
|
},
|
2015-04-21 20:40:35 +00:00
|
|
|
Status: api.PodStatus{Phase: status},
|
|
|
|
}
|
|
|
|
if store != nil {
|
|
|
|
store.Add(&newPod)
|
|
|
|
}
|
|
|
|
pods = append(pods, newPod)
|
2014-06-06 23:40:48 +00:00
|
|
|
}
|
2014-09-08 04:14:18 +00:00
|
|
|
return &api.PodList{
|
2014-06-09 05:38:45 +00:00
|
|
|
Items: pods,
|
2014-06-06 23:40:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-21 23:30:02 +00:00
|
|
|
func validateSyncReplication(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
|
|
|
if len(fakePodControl.Templates) != expectedCreates {
|
|
|
|
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates))
|
2014-06-06 23:40:48 +00:00
|
|
|
}
|
2015-09-21 23:30:02 +00:00
|
|
|
if len(fakePodControl.DeletePodName) != expectedDeletes {
|
|
|
|
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName))
|
2014-06-06 23:40:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-24 07:06:51 +00:00
|
|
|
func replicationControllerResourceName() string {
|
|
|
|
return "replicationcontrollers"
|
|
|
|
}
|
|
|
|
|
|
|
|
type serverResponse struct {
|
|
|
|
statusCode int
|
|
|
|
obj interface{}
|
|
|
|
}
|
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2014-06-06 23:40:48 +00:00
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
2014-08-21 04:27:19 +00:00
|
|
|
controllerSpec := newReplicationController(2)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(controllerSpec)
|
2016-03-11 18:34:13 +00:00
|
|
|
newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec, "pod")
|
2014-06-06 23:40:48 +00:00
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
2014-06-09 05:38:45 +00:00
|
|
|
validateSyncReplication(t, &fakePodControl, 0, 0)
|
2014-06-06 23:40:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSyncReplicationControllerDeletes(t *testing.T) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2014-06-09 05:38:45 +00:00
|
|
|
manager.podControl = &fakePodControl
|
2014-06-06 23:40:48 +00:00
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
// 2 running pods and a controller with 1 replica, one pod delete expected
|
2014-08-21 04:27:19 +00:00
|
|
|
controllerSpec := newReplicationController(1)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(controllerSpec)
|
2016-03-11 18:34:13 +00:00
|
|
|
newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec, "pod")
|
2014-06-06 23:40:48 +00:00
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
2014-06-09 05:38:45 +00:00
|
|
|
validateSyncReplication(t, &fakePodControl, 0, 1)
|
2014-06-06 23:40:48 +00:00
|
|
|
}
|
|
|
|
|
2015-05-29 16:24:39 +00:00
|
|
|
func TestDeleteFinalStateUnknown(t *testing.T) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-05-29 16:24:39 +00:00
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
received := make(chan string)
|
|
|
|
manager.syncHandler = func(key string) error {
|
|
|
|
received <- key
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The DeletedFinalStateUnknown object should cause the rc manager to insert
|
|
|
|
// the controller matching the selectors of the deleted pod into the work queue.
|
|
|
|
controllerSpec := newReplicationController(1)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(controllerSpec)
|
2016-03-11 18:34:13 +00:00
|
|
|
pods := newPodList(nil, 1, api.PodRunning, controllerSpec, "pod")
|
2015-05-29 16:24:39 +00:00
|
|
|
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
|
|
|
|
|
|
|
go manager.worker()
|
|
|
|
|
|
|
|
expected := getKey(controllerSpec, t)
|
|
|
|
select {
|
|
|
|
case key := <-received:
|
|
|
|
if key != expected {
|
|
|
|
t.Errorf("Unexpected sync all for rc %v, expected %v", key, expected)
|
|
|
|
}
|
2016-02-02 10:57:06 +00:00
|
|
|
case <-time.After(wait.ForeverTestTimeout):
|
2015-05-29 16:24:39 +00:00
|
|
|
t.Errorf("Processing DeleteFinalStateUnknown took longer than expected")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-06 23:40:48 +00:00
|
|
|
func TestSyncReplicationControllerCreates(t *testing.T) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-04-21 20:40:35 +00:00
|
|
|
|
|
|
|
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
2015-09-21 23:30:02 +00:00
|
|
|
rc := newReplicationController(2)
|
|
|
|
manager.rcStore.Store.Add(rc)
|
2014-06-06 23:40:48 +00:00
|
|
|
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2014-06-09 05:38:45 +00:00
|
|
|
manager.podControl = &fakePodControl
|
2015-09-21 23:30:02 +00:00
|
|
|
manager.syncReplicationController(getKey(rc, t))
|
2014-06-09 05:38:45 +00:00
|
|
|
validateSyncReplication(t, &fakePodControl, 2, 0)
|
2014-06-06 23:40:48 +00:00
|
|
|
}
|
|
|
|
|
2015-06-18 19:00:19 +00:00
|
|
|
func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
2015-04-21 20:40:35 +00:00
|
|
|
// Setup a fake server to listen for requests, and run the rc manager in steady state
|
2016-01-15 06:33:50 +00:00
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
2015-04-21 20:40:35 +00:00
|
|
|
StatusCode: 200,
|
|
|
|
ResponseBody: "",
|
|
|
|
}
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
2016-04-21 11:50:55 +00:00
|
|
|
defer testServer.Close()
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2014-06-18 00:38:06 +00:00
|
|
|
|
2015-02-13 19:57:27 +00:00
|
|
|
// Steady state for the replication controller, no Status.Replicas updates expected
|
|
|
|
activePods := 5
|
2015-04-21 20:40:35 +00:00
|
|
|
rc := newReplicationController(activePods)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(rc)
|
2016-04-27 04:35:14 +00:00
|
|
|
rc.Status = api.ReplicationControllerStatus{Replicas: int32(activePods)}
|
2016-03-11 18:34:13 +00:00
|
|
|
newPodList(manager.podStore.Store, activePods, api.PodRunning, rc, "pod")
|
2015-02-13 19:57:27 +00:00
|
|
|
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2015-02-13 19:57:27 +00:00
|
|
|
manager.podControl = &fakePodControl
|
2015-04-21 20:40:35 +00:00
|
|
|
manager.syncReplicationController(getKey(rc, t))
|
2015-02-13 19:57:27 +00:00
|
|
|
|
|
|
|
validateSyncReplication(t, &fakePodControl, 0, 0)
|
2015-04-21 20:40:35 +00:00
|
|
|
if fakeHandler.RequestReceived != nil {
|
|
|
|
t.Errorf("Unexpected update when pods and rcs are in a steady state")
|
2015-02-13 19:57:27 +00:00
|
|
|
}
|
2015-06-18 19:00:19 +00:00
|
|
|
|
|
|
|
// This response body is just so we don't err out decoding the http response, all
|
|
|
|
// we care about is the request body sent below.
|
2015-09-04 07:06:01 +00:00
|
|
|
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{})
|
2015-06-18 19:00:19 +00:00
|
|
|
fakeHandler.ResponseBody = response
|
|
|
|
|
|
|
|
rc.Generation = rc.Generation + 1
|
|
|
|
manager.syncReplicationController(getKey(rc, t))
|
|
|
|
|
|
|
|
rc.Status.ObservedGeneration = rc.Generation
|
2015-09-04 07:06:01 +00:00
|
|
|
updatedRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
|
2015-09-28 19:39:57 +00:00
|
|
|
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &updatedRc)
|
2015-02-13 19:57:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestControllerUpdateReplicas(t *testing.T) {
|
2015-04-21 20:40:35 +00:00
|
|
|
// This is a happy server just to record the PUT request we expect for status.Replicas
|
2016-01-15 06:33:50 +00:00
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
2015-04-21 20:40:35 +00:00
|
|
|
StatusCode: 200,
|
|
|
|
ResponseBody: "",
|
|
|
|
}
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
2016-04-21 11:50:55 +00:00
|
|
|
defer testServer.Close()
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-04-21 20:40:35 +00:00
|
|
|
|
2015-02-13 19:57:27 +00:00
|
|
|
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
|
|
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
|
|
|
rc := newReplicationController(5)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(rc)
|
2016-03-11 18:34:13 +00:00
|
|
|
rc.Status = api.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ObservedGeneration: 0}
|
2015-06-18 19:00:19 +00:00
|
|
|
rc.Generation = 1
|
2016-03-11 18:34:13 +00:00
|
|
|
newPodList(manager.podStore.Store, 2, api.PodRunning, rc, "pod")
|
|
|
|
rcCopy := *rc
|
|
|
|
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
|
|
|
rcCopy.Spec.Selector = extraLabelMap
|
|
|
|
newPodList(manager.podStore.Store, 2, api.PodRunning, &rcCopy, "podWithExtraLabel")
|
2015-06-18 19:00:19 +00:00
|
|
|
|
|
|
|
// This response body is just so we don't err out decoding the http response
|
2015-09-04 07:06:01 +00:00
|
|
|
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{})
|
2015-04-21 20:40:35 +00:00
|
|
|
fakeHandler.ResponseBody = response
|
|
|
|
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2015-02-13 19:57:27 +00:00
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
manager.syncReplicationController(getKey(rc, t))
|
2015-02-13 19:57:27 +00:00
|
|
|
|
2015-06-18 19:00:19 +00:00
|
|
|
// 1. Status.Replicas should go up from 2->4 even though we created 5-4=1 pod.
|
2016-03-11 18:34:13 +00:00
|
|
|
// 2. Status.FullyLabeledReplicas should equal to the number of pods that
|
|
|
|
// has the extra labels, i.e., 2.
|
|
|
|
// 3. Every update to the status should include the Generation of the spec.
|
2015-06-18 19:00:19 +00:00
|
|
|
rc.Status = api.ReplicationControllerStatus{Replicas: 4, ObservedGeneration: 1}
|
2015-04-09 05:13:59 +00:00
|
|
|
|
2015-09-04 07:06:01 +00:00
|
|
|
decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
|
2015-09-28 19:39:57 +00:00
|
|
|
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc)
|
2015-02-13 19:57:27 +00:00
|
|
|
validateSyncReplication(t, &fakePodControl, 1, 0)
|
|
|
|
}
|
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
func TestSyncReplicationControllerDormancy(t *testing.T) {
|
|
|
|
// Setup a test server so we can lie about the current state of pods
|
2016-01-15 06:33:50 +00:00
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
2015-04-21 20:40:35 +00:00
|
|
|
StatusCode: 200,
|
2016-01-14 01:43:52 +00:00
|
|
|
ResponseBody: "{}",
|
2015-04-21 20:40:35 +00:00
|
|
|
}
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
2016-04-21 11:50:55 +00:00
|
|
|
defer testServer.Close()
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-04-21 20:40:35 +00:00
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
controllerSpec := newReplicationController(2)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(controllerSpec)
|
2016-03-11 18:34:13 +00:00
|
|
|
newPodList(manager.podStore.Store, 1, api.PodRunning, controllerSpec, "pod")
|
2015-04-21 20:40:35 +00:00
|
|
|
|
|
|
|
// Creates a replica and sets expectations
|
|
|
|
controllerSpec.Status.Replicas = 1
|
|
|
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
|
|
|
validateSyncReplication(t, &fakePodControl, 1, 0)
|
|
|
|
|
|
|
|
// Expectations prevents replicas but not an update on status
|
|
|
|
controllerSpec.Status.Replicas = 0
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl.Clear()
|
2015-04-21 20:40:35 +00:00
|
|
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
|
|
|
validateSyncReplication(t, &fakePodControl, 0, 0)
|
|
|
|
|
2015-07-28 01:21:37 +00:00
|
|
|
// Get the key for the controller
|
|
|
|
rcKey, err := controller.KeyFunc(controllerSpec)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
|
|
|
|
}
|
|
|
|
|
2015-04-21 20:40:35 +00:00
|
|
|
// Lowering expectations should lead to a sync that creates a replica, however the
|
|
|
|
// fakePodControl error will prevent this, leaving expectations at 0, 0
|
2015-07-28 01:21:37 +00:00
|
|
|
manager.expectations.CreationObserved(rcKey)
|
2015-04-21 20:40:35 +00:00
|
|
|
controllerSpec.Status.Replicas = 1
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl.Clear()
|
|
|
|
fakePodControl.Err = fmt.Errorf("Fake Error")
|
2015-04-21 20:40:35 +00:00
|
|
|
|
|
|
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
|
|
|
validateSyncReplication(t, &fakePodControl, 0, 0)
|
|
|
|
|
|
|
|
// This replica should not need a Lowering of expectations, since the previous create failed
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl.Err = nil
|
2015-04-21 20:40:35 +00:00
|
|
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
|
|
|
validateSyncReplication(t, &fakePodControl, 1, 0)
|
|
|
|
|
|
|
|
// 1 PUT for the rc status during dormancy window.
|
|
|
|
// Note that the pod creates go through pod control so they're not recorded.
|
|
|
|
fakeHandler.ValidateRequestCount(t, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPodControllerLookup(t *testing.T) {
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-04-21 20:40:35 +00:00
|
|
|
testCases := []struct {
|
|
|
|
inRCs []*api.ReplicationController
|
|
|
|
pod *api.Pod
|
|
|
|
outRCName string
|
|
|
|
}{
|
|
|
|
// pods without labels don't match any rcs
|
|
|
|
{
|
|
|
|
inRCs: []*api.ReplicationController{
|
|
|
|
{ObjectMeta: api.ObjectMeta{Name: "basic"}}},
|
|
|
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}},
|
|
|
|
outRCName: "",
|
|
|
|
},
|
|
|
|
// Matching labels, not namespace
|
|
|
|
{
|
|
|
|
inRCs: []*api.ReplicationController{
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: "foo"},
|
|
|
|
Spec: api.ReplicationControllerSpec{
|
|
|
|
Selector: map[string]string{"foo": "bar"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
pod: &api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
|
|
|
outRCName: "",
|
|
|
|
},
|
|
|
|
// Matching ns and labels returns the key to the rc, not the rc name
|
|
|
|
{
|
|
|
|
inRCs: []*api.ReplicationController{
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"},
|
|
|
|
Spec: api.ReplicationControllerSpec{
|
|
|
|
Selector: map[string]string{"foo": "bar"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
pod: &api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
|
|
|
outRCName: "bar",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, c := range testCases {
|
|
|
|
for _, r := range c.inRCs {
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Add(r)
|
2015-04-21 20:40:35 +00:00
|
|
|
}
|
2015-07-29 20:16:58 +00:00
|
|
|
if rc := manager.getPodController(c.pod); rc != nil {
|
2015-04-21 20:40:35 +00:00
|
|
|
if c.outRCName != rc.Name {
|
|
|
|
t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName)
|
|
|
|
}
|
|
|
|
} else if c.outRCName != "" {
|
|
|
|
t.Errorf("Expected a controller %v pod %v, found none", c.outRCName, c.pod.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestWatchControllers(t *testing.T) {
|
|
|
|
fakeWatch := watch.NewFake()
|
2016-01-15 05:00:58 +00:00
|
|
|
c := &fake.Clientset{}
|
|
|
|
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-04-21 20:40:35 +00:00
|
|
|
|
|
|
|
var testControllerSpec api.ReplicationController
|
|
|
|
received := make(chan string)
|
|
|
|
|
|
|
|
// The update sent through the fakeWatcher should make its way into the workqueue,
|
|
|
|
// and eventually into the syncHandler. The handler validates the received controller
|
|
|
|
// and closes the received channel to indicate that the test can finish.
|
|
|
|
manager.syncHandler = func(key string) error {
|
|
|
|
|
2015-07-27 22:41:00 +00:00
|
|
|
obj, exists, err := manager.rcStore.Store.GetByKey(key)
|
2015-04-21 20:40:35 +00:00
|
|
|
if !exists || err != nil {
|
|
|
|
t.Errorf("Expected to find controller under key %v", key)
|
|
|
|
}
|
|
|
|
controllerSpec := *obj.(*api.ReplicationController)
|
|
|
|
if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
|
|
|
|
t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec)
|
|
|
|
}
|
|
|
|
close(received)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Start only the rc watcher and the workqueue, send a watch event,
|
|
|
|
// and make sure it hits the sync method.
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
defer close(stopCh)
|
|
|
|
go manager.rcController.Run(stopCh)
|
2016-02-02 10:57:06 +00:00
|
|
|
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
2015-04-21 20:40:35 +00:00
|
|
|
|
|
|
|
testControllerSpec.Name = "foo"
|
|
|
|
fakeWatch.Add(&testControllerSpec)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-received:
|
2016-02-02 10:57:06 +00:00
|
|
|
case <-time.After(wait.ForeverTestTimeout):
|
2015-04-21 20:40:35 +00:00
|
|
|
t.Errorf("Expected 1 call but got 0")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestWatchPods(t *testing.T) {
|
|
|
|
fakeWatch := watch.NewFake()
|
2016-01-15 05:00:58 +00:00
|
|
|
c := &fake.Clientset{}
|
|
|
|
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-04-21 20:40:35 +00:00
|
|
|
|
|
|
|
// Put one rc and one pod into the controller's stores
|
|
|
|
testControllerSpec := newReplicationController(1)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(testControllerSpec)
|
2015-04-21 20:40:35 +00:00
|
|
|
received := make(chan string)
|
|
|
|
// The pod update sent through the fakeWatcher should figure out the managing rc and
|
|
|
|
// send it into the syncHandler.
|
|
|
|
manager.syncHandler = func(key string) error {
|
|
|
|
|
2015-07-27 22:41:00 +00:00
|
|
|
obj, exists, err := manager.rcStore.Store.GetByKey(key)
|
2015-04-21 20:40:35 +00:00
|
|
|
if !exists || err != nil {
|
|
|
|
t.Errorf("Expected to find controller under key %v", key)
|
|
|
|
}
|
|
|
|
controllerSpec := obj.(*api.ReplicationController)
|
|
|
|
if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
|
|
|
|
t.Errorf("\nExpected %#v,\nbut got %#v", testControllerSpec, controllerSpec)
|
|
|
|
}
|
|
|
|
close(received)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Start only the pod watcher and the workqueue, send a watch event,
|
|
|
|
// and make sure it hits the sync method for the right rc.
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
defer close(stopCh)
|
|
|
|
go manager.podController.Run(stopCh)
|
2016-04-14 18:00:52 +00:00
|
|
|
go manager.internalPodInformer.Run(stopCh)
|
2016-02-02 10:57:06 +00:00
|
|
|
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
2015-04-21 20:40:35 +00:00
|
|
|
|
2016-03-11 18:34:13 +00:00
|
|
|
pods := newPodList(nil, 1, api.PodRunning, testControllerSpec, "pod")
|
2015-04-21 20:40:35 +00:00
|
|
|
testPod := pods.Items[0]
|
|
|
|
testPod.Status.Phase = api.PodFailed
|
|
|
|
fakeWatch.Add(&testPod)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-received:
|
2016-02-02 10:57:06 +00:00
|
|
|
case <-time.After(wait.ForeverTestTimeout):
|
2015-04-21 20:40:35 +00:00
|
|
|
t.Errorf("Expected 1 call but got 0")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdatePods(t *testing.T) {
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-04-21 20:40:35 +00:00
|
|
|
|
|
|
|
received := make(chan string)
|
|
|
|
|
|
|
|
manager.syncHandler = func(key string) error {
|
2015-07-27 22:41:00 +00:00
|
|
|
obj, exists, err := manager.rcStore.Store.GetByKey(key)
|
2015-04-21 20:40:35 +00:00
|
|
|
if !exists || err != nil {
|
|
|
|
t.Errorf("Expected to find controller under key %v", key)
|
|
|
|
}
|
|
|
|
received <- obj.(*api.ReplicationController).Name
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
defer close(stopCh)
|
2016-02-02 10:57:06 +00:00
|
|
|
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
2015-04-21 20:40:35 +00:00
|
|
|
|
|
|
|
// Put 2 rcs and one pod into the controller's stores
|
|
|
|
testControllerSpec1 := newReplicationController(1)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(testControllerSpec1)
|
2015-04-21 20:40:35 +00:00
|
|
|
testControllerSpec2 := *testControllerSpec1
|
|
|
|
testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"}
|
|
|
|
testControllerSpec2.Name = "barfoo"
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(&testControllerSpec2)
|
2015-04-21 20:40:35 +00:00
|
|
|
|
|
|
|
// Put one pod in the podStore
|
2016-03-11 18:34:13 +00:00
|
|
|
pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, testControllerSpec1, "pod").Items[0]
|
2015-04-21 20:40:35 +00:00
|
|
|
pod2 := pod1
|
|
|
|
pod2.Labels = testControllerSpec2.Spec.Selector
|
|
|
|
|
|
|
|
// Send an update of the same pod with modified labels, and confirm we get a sync request for
|
|
|
|
// both controllers
|
|
|
|
manager.updatePod(&pod1, &pod2)
|
|
|
|
|
2015-09-09 17:45:01 +00:00
|
|
|
expected := sets.NewString(testControllerSpec1.Name, testControllerSpec2.Name)
|
2015-04-21 20:40:35 +00:00
|
|
|
for _, name := range expected.List() {
|
|
|
|
t.Logf("Expecting update for %+v", name)
|
|
|
|
select {
|
|
|
|
case got := <-received:
|
|
|
|
if !expected.Has(got) {
|
|
|
|
t.Errorf("Expected keys %#v got %v", expected, got)
|
|
|
|
}
|
2016-02-02 10:57:06 +00:00
|
|
|
case <-time.After(wait.ForeverTestTimeout):
|
2015-04-21 20:40:35 +00:00
|
|
|
t.Errorf("Expected update notifications for controllers within 100ms each")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-05-01 15:49:06 +00:00
|
|
|
|
|
|
|
func TestControllerUpdateRequeue(t *testing.T) {
|
2015-08-08 21:29:57 +00:00
|
|
|
// This server should force a requeue of the controller because it fails to update status.Replicas.
|
2016-01-15 06:33:50 +00:00
|
|
|
fakeHandler := utiltesting.FakeHandler{
|
2015-05-01 15:49:06 +00:00
|
|
|
StatusCode: 500,
|
|
|
|
ResponseBody: "",
|
|
|
|
}
|
|
|
|
testServer := httptest.NewServer(&fakeHandler)
|
2016-04-21 11:50:55 +00:00
|
|
|
defer testServer.Close()
|
2015-05-01 15:49:06 +00:00
|
|
|
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-05-01 15:49:06 +00:00
|
|
|
|
|
|
|
rc := newReplicationController(1)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(rc)
|
2015-05-01 15:49:06 +00:00
|
|
|
rc.Status = api.ReplicationControllerStatus{Replicas: 2}
|
2016-03-11 18:34:13 +00:00
|
|
|
newPodList(manager.podStore.Store, 1, api.PodRunning, rc, "pod")
|
2015-05-01 15:49:06 +00:00
|
|
|
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2015-05-01 15:49:06 +00:00
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
manager.syncReplicationController(getKey(rc, t))
|
|
|
|
|
|
|
|
ch := make(chan interface{})
|
|
|
|
go func() {
|
|
|
|
item, _ := manager.queue.Get()
|
|
|
|
ch <- item
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case key := <-ch:
|
|
|
|
expectedKey := getKey(rc, t)
|
|
|
|
if key != expectedKey {
|
|
|
|
t.Errorf("Expected requeue of controller with key %s got %s", expectedKey, key)
|
|
|
|
}
|
2016-02-02 10:57:06 +00:00
|
|
|
case <-time.After(wait.ForeverTestTimeout):
|
2015-05-01 15:49:06 +00:00
|
|
|
manager.queue.ShutDown()
|
|
|
|
t.Errorf("Expected to find an rc in the queue, found none.")
|
|
|
|
}
|
|
|
|
// 1 Update and 1 GET, both of which fail
|
|
|
|
fakeHandler.ValidateRequestCount(t, 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|
|
|
rc := newReplicationController(1)
|
2016-01-15 05:00:58 +00:00
|
|
|
c := &fake.Clientset{}
|
|
|
|
c.AddReactor("get", "replicationcontrollers", func(action core.Action) (bool, runtime.Object, error) {
|
2015-08-03 17:30:35 +00:00
|
|
|
return true, rc, nil
|
|
|
|
})
|
2016-01-15 05:00:58 +00:00
|
|
|
c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
2015-08-03 17:30:35 +00:00
|
|
|
return true, &api.ReplicationController{}, fmt.Errorf("Fake error")
|
|
|
|
})
|
2016-02-03 21:21:05 +00:00
|
|
|
fakeRCClient := c.Core().ReplicationControllers("default")
|
2015-05-01 15:49:06 +00:00
|
|
|
numReplicas := 10
|
2016-03-11 18:34:13 +00:00
|
|
|
updateReplicaCount(fakeRCClient, *rc, numReplicas, 0)
|
2015-05-01 15:49:06 +00:00
|
|
|
updates, gets := 0, 0
|
2016-01-15 05:00:58 +00:00
|
|
|
for _, a := range c.Actions() {
|
2016-04-13 22:33:15 +00:00
|
|
|
if a.GetResource().Resource != "replicationcontrollers" {
|
2015-08-03 13:21:11 +00:00
|
|
|
t.Errorf("Unexpected action %+v", a)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch action := a.(type) {
|
2016-01-15 05:00:58 +00:00
|
|
|
case core.GetAction:
|
2015-05-01 15:49:06 +00:00
|
|
|
gets++
|
|
|
|
// Make sure the get is for the right rc even though the update failed.
|
2015-08-03 13:21:11 +00:00
|
|
|
if action.GetName() != rc.Name {
|
|
|
|
t.Errorf("Expected get for rc %v, got %+v instead", rc.Name, action.GetName())
|
2015-05-01 15:49:06 +00:00
|
|
|
}
|
2016-01-15 05:00:58 +00:00
|
|
|
case core.UpdateAction:
|
2015-05-01 15:49:06 +00:00
|
|
|
updates++
|
|
|
|
// Confirm that the update has the right status.Replicas even though the Get
|
|
|
|
// returned an rc with replicas=1.
|
2015-08-03 13:21:11 +00:00
|
|
|
if c, ok := action.GetObject().(*api.ReplicationController); !ok {
|
2015-05-01 15:49:06 +00:00
|
|
|
t.Errorf("Expected an rc as the argument to update, got %T", c)
|
2016-04-27 04:35:14 +00:00
|
|
|
} else if c.Status.Replicas != int32(numReplicas) {
|
2015-05-01 15:49:06 +00:00
|
|
|
t.Errorf("Expected update for rc to contain replicas %v, got %v instead",
|
|
|
|
numReplicas, c.Status.Replicas)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
t.Errorf("Unexpected action %+v", a)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if gets != 1 || updates != 2 {
|
|
|
|
t.Errorf("Expected 1 get and 2 updates, got %d gets %d updates", gets, updates)
|
|
|
|
}
|
|
|
|
}
|
2015-05-06 21:39:14 +00:00
|
|
|
|
2016-03-05 00:51:01 +00:00
|
|
|
// TODO: This test is too hairy for a unittest. It should be moved to an E2E suite.
|
2015-05-06 21:39:14 +00:00
|
|
|
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, burstReplicas, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-05-06 21:39:14 +00:00
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
controllerSpec := newReplicationController(numReplicas)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(controllerSpec)
|
2015-05-06 21:39:14 +00:00
|
|
|
|
|
|
|
expectedPods := 0
|
2016-03-11 18:34:13 +00:00
|
|
|
pods := newPodList(nil, numReplicas, api.PodPending, controllerSpec, "pod")
|
2015-05-06 21:39:14 +00:00
|
|
|
|
2015-07-28 01:21:37 +00:00
|
|
|
rcKey, err := controller.KeyFunc(controllerSpec)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
|
|
|
|
}
|
|
|
|
|
2015-05-06 21:39:14 +00:00
|
|
|
// Size up the controller, then size it down, and confirm the expected create/delete pattern
|
|
|
|
for _, replicas := range []int{numReplicas, 0} {
|
|
|
|
|
2016-04-27 04:35:14 +00:00
|
|
|
controllerSpec.Spec.Replicas = int32(replicas)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(controllerSpec)
|
2015-05-06 21:39:14 +00:00
|
|
|
|
|
|
|
for i := 0; i < numReplicas; i += burstReplicas {
|
|
|
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
|
|
|
|
|
|
|
// The store accrues active pods. It's also used by the rc to determine how many
|
|
|
|
// replicas to create.
|
|
|
|
activePods := len(manager.podStore.Store.List())
|
|
|
|
if replicas != 0 {
|
|
|
|
// This is the number of pods currently "in flight". They were created by the rc manager above,
|
|
|
|
// which then puts the rc to sleep till all of them have been observed.
|
|
|
|
expectedPods = replicas - activePods
|
|
|
|
if expectedPods > burstReplicas {
|
|
|
|
expectedPods = burstReplicas
|
|
|
|
}
|
|
|
|
// This validates the rc manager sync actually created pods
|
|
|
|
validateSyncReplication(t, &fakePodControl, expectedPods, 0)
|
|
|
|
|
|
|
|
// This simulates the watch events for all but 1 of the expected pods.
|
|
|
|
// None of these should wake the controller because it has expectations==BurstReplicas.
|
2015-05-15 05:41:34 +00:00
|
|
|
for i := 0; i < expectedPods-1; i++ {
|
|
|
|
manager.podStore.Store.Add(&pods.Items[i])
|
|
|
|
manager.addPod(&pods.Items[i])
|
2015-05-06 21:39:14 +00:00
|
|
|
}
|
|
|
|
|
2015-07-28 01:21:37 +00:00
|
|
|
podExp, exists, err := manager.expectations.GetExpectations(rcKey)
|
2015-05-06 21:39:14 +00:00
|
|
|
if !exists || err != nil {
|
|
|
|
t.Fatalf("Did not find expectations for rc.")
|
|
|
|
}
|
2015-07-28 01:21:37 +00:00
|
|
|
if add, _ := podExp.GetExpectations(); add != 1 {
|
2015-05-06 21:39:14 +00:00
|
|
|
t.Fatalf("Expectations are wrong %v", podExp)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
expectedPods = (replicas - activePods) * -1
|
|
|
|
if expectedPods > burstReplicas {
|
|
|
|
expectedPods = burstReplicas
|
|
|
|
}
|
|
|
|
validateSyncReplication(t, &fakePodControl, 0, expectedPods)
|
2016-03-05 00:51:01 +00:00
|
|
|
|
|
|
|
// To accurately simulate a watch we must delete the exact pods
|
|
|
|
// the rc is waiting for.
|
|
|
|
expectedDels := manager.expectations.GetUIDs(getKey(controllerSpec, t))
|
|
|
|
podsToDelete := []*api.Pod{}
|
|
|
|
for _, key := range expectedDels.List() {
|
|
|
|
nsName := strings.Split(key, "/")
|
|
|
|
podsToDelete = append(podsToDelete, &api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: nsName[1],
|
|
|
|
Namespace: nsName[0],
|
|
|
|
Labels: controllerSpec.Spec.Selector,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
// Don't delete all pods because we confirm that the last pod
|
|
|
|
// has exactly one expectation at the end, to verify that we
|
|
|
|
// don't double delete.
|
|
|
|
for i := range podsToDelete[1:] {
|
|
|
|
manager.podStore.Delete(podsToDelete[i])
|
|
|
|
manager.deletePod(podsToDelete[i])
|
2015-05-06 21:39:14 +00:00
|
|
|
}
|
2015-07-28 01:21:37 +00:00
|
|
|
podExp, exists, err := manager.expectations.GetExpectations(rcKey)
|
2015-05-06 21:39:14 +00:00
|
|
|
if !exists || err != nil {
|
|
|
|
t.Fatalf("Did not find expectations for rc.")
|
|
|
|
}
|
2015-07-28 01:21:37 +00:00
|
|
|
if _, del := podExp.GetExpectations(); del != 1 {
|
2015-05-06 21:39:14 +00:00
|
|
|
t.Fatalf("Expectations are wrong %v", podExp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the rc didn't take any action for all the above pods
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl.Clear()
|
2015-05-06 21:39:14 +00:00
|
|
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
|
|
|
validateSyncReplication(t, &fakePodControl, 0, 0)
|
|
|
|
|
|
|
|
// Create/Delete the last pod
|
|
|
|
// The last add pod will decrease the expectation of the rc to 0,
|
2015-08-08 21:29:57 +00:00
|
|
|
// which will cause it to create/delete the remaining replicas up to burstReplicas.
|
2015-05-06 21:39:14 +00:00
|
|
|
if replicas != 0 {
|
|
|
|
manager.podStore.Store.Add(&pods.Items[expectedPods-1])
|
|
|
|
manager.addPod(&pods.Items[expectedPods-1])
|
|
|
|
} else {
|
2016-03-05 00:51:01 +00:00
|
|
|
expectedDel := manager.expectations.GetUIDs(getKey(controllerSpec, t))
|
|
|
|
if expectedDel.Len() != 1 {
|
|
|
|
t.Fatalf("Waiting on unexpected number of deletes.")
|
|
|
|
}
|
|
|
|
nsName := strings.Split(expectedDel.List()[0], "/")
|
|
|
|
lastPod := &api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: nsName[1],
|
|
|
|
Namespace: nsName[0],
|
|
|
|
Labels: controllerSpec.Spec.Selector,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
manager.podStore.Store.Delete(lastPod)
|
|
|
|
manager.deletePod(lastPod)
|
2015-05-06 21:39:14 +00:00
|
|
|
}
|
|
|
|
pods.Items = pods.Items[expectedPods:]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Confirm that we've created the right number of replicas
|
2016-04-27 04:35:14 +00:00
|
|
|
activePods := int32(len(manager.podStore.Store.List()))
|
2015-05-06 21:39:14 +00:00
|
|
|
if activePods != controllerSpec.Spec.Replicas {
|
|
|
|
t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods)
|
|
|
|
}
|
|
|
|
// Replenish the pod list, since we cut it down sizing up
|
2016-03-11 18:34:13 +00:00
|
|
|
pods = newPodList(nil, replicas, api.PodRunning, controllerSpec, "pod")
|
2015-05-06 21:39:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestControllerBurstReplicas(t *testing.T) {
|
|
|
|
doTestControllerBurstReplicas(t, 5, 30)
|
|
|
|
doTestControllerBurstReplicas(t, 5, 12)
|
|
|
|
doTestControllerBurstReplicas(t, 3, 2)
|
|
|
|
}
|
2015-05-12 21:39:23 +00:00
|
|
|
|
|
|
|
type FakeRCExpectations struct {
|
2015-07-28 01:21:37 +00:00
|
|
|
*controller.ControllerExpectations
|
2015-05-12 21:39:23 +00:00
|
|
|
satisfied bool
|
|
|
|
expSatisfied func()
|
|
|
|
}
|
|
|
|
|
2015-07-28 01:21:37 +00:00
|
|
|
func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool {
|
2015-05-12 21:39:23 +00:00
|
|
|
fe.expSatisfied()
|
|
|
|
return fe.satisfied
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods
|
|
|
|
// and checking expectations.
|
|
|
|
func TestRCSyncExpectations(t *testing.T) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-05-12 21:39:23 +00:00
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
controllerSpec := newReplicationController(2)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(controllerSpec)
|
2016-03-11 18:34:13 +00:00
|
|
|
pods := newPodList(nil, 2, api.PodPending, controllerSpec, "pod")
|
2015-05-12 21:39:23 +00:00
|
|
|
manager.podStore.Store.Add(&pods.Items[0])
|
|
|
|
postExpectationsPod := pods.Items[1]
|
|
|
|
|
2016-03-05 00:51:01 +00:00
|
|
|
manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRCExpectations{
|
2015-07-28 01:21:37 +00:00
|
|
|
controller.NewControllerExpectations(), true, func() {
|
2015-05-12 21:39:23 +00:00
|
|
|
// If we check active pods before checking expectataions, the rc
|
|
|
|
// will create a new replica because it doesn't see this pod, but
|
|
|
|
// has fulfilled its expectations.
|
|
|
|
manager.podStore.Store.Add(&postExpectationsPod)
|
|
|
|
},
|
2016-03-05 00:51:01 +00:00
|
|
|
})
|
2015-05-12 21:39:23 +00:00
|
|
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
|
|
|
validateSyncReplication(t, &fakePodControl, 0, 0)
|
|
|
|
}
|
2015-05-08 21:16:58 +00:00
|
|
|
|
|
|
|
func TestDeleteControllerAndExpectations(t *testing.T) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
2015-05-08 21:16:58 +00:00
|
|
|
|
|
|
|
rc := newReplicationController(1)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(rc)
|
2015-05-08 21:16:58 +00:00
|
|
|
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2015-05-08 21:16:58 +00:00
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
|
|
|
|
// This should set expectations for the rc
|
|
|
|
manager.syncReplicationController(getKey(rc, t))
|
|
|
|
validateSyncReplication(t, &fakePodControl, 1, 0)
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl.Clear()
|
2015-05-08 21:16:58 +00:00
|
|
|
|
2015-07-28 01:21:37 +00:00
|
|
|
// Get the RC key
|
|
|
|
rcKey, err := controller.KeyFunc(rc)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Couldn't get key for object %+v: %v", rc, err)
|
|
|
|
}
|
|
|
|
|
2015-05-08 21:16:58 +00:00
|
|
|
// This is to simulate a concurrent addPod, that has a handle on the expectations
|
|
|
|
// as the controller deletes it.
|
2015-07-28 01:21:37 +00:00
|
|
|
podExp, exists, err := manager.expectations.GetExpectations(rcKey)
|
2015-05-08 21:16:58 +00:00
|
|
|
if !exists || err != nil {
|
|
|
|
t.Errorf("No expectations found for rc")
|
|
|
|
}
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Delete(rc)
|
2015-05-08 21:16:58 +00:00
|
|
|
manager.syncReplicationController(getKey(rc, t))
|
|
|
|
|
2015-07-28 01:21:37 +00:00
|
|
|
if _, exists, err = manager.expectations.GetExpectations(rcKey); exists {
|
2015-05-08 21:16:58 +00:00
|
|
|
t.Errorf("Found expectaions, expected none since the rc has been deleted.")
|
|
|
|
}
|
|
|
|
|
|
|
|
// This should have no effect, since we've deleted the rc.
|
2016-02-25 06:40:14 +00:00
|
|
|
podExp.Add(-1, 0)
|
2015-08-18 08:34:27 +00:00
|
|
|
manager.podStore.Store.Replace(make([]interface{}, 0), "0")
|
2015-05-08 21:16:58 +00:00
|
|
|
manager.syncReplicationController(getKey(rc, t))
|
|
|
|
validateSyncReplication(t, &fakePodControl, 0, 0)
|
|
|
|
}
|
2015-06-19 20:35:19 +00:00
|
|
|
|
|
|
|
func TestRCManagerNotReady(t *testing.T) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2015-09-21 23:30:02 +00:00
|
|
|
fakePodControl := controller.FakePodControl{}
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
|
2015-06-19 20:35:19 +00:00
|
|
|
manager.podControl = &fakePodControl
|
|
|
|
manager.podStoreSynced = func() bool { return false }
|
|
|
|
|
|
|
|
// Simulates the rc reflector running before the pod reflector. We don't
|
|
|
|
// want to end up creating replicas in this case until the pod reflector
|
|
|
|
// has synced, so the rc manager should just requeue the rc.
|
|
|
|
controllerSpec := newReplicationController(1)
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(controllerSpec)
|
2015-06-19 20:35:19 +00:00
|
|
|
|
|
|
|
rcKey := getKey(controllerSpec, t)
|
|
|
|
manager.syncReplicationController(rcKey)
|
|
|
|
validateSyncReplication(t, &fakePodControl, 0, 0)
|
|
|
|
queueRC, _ := manager.queue.Get()
|
|
|
|
if queueRC != rcKey {
|
|
|
|
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
|
|
|
}
|
|
|
|
|
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
manager.syncReplicationController(rcKey)
|
|
|
|
validateSyncReplication(t, &fakePodControl, 1, 0)
|
|
|
|
}
|
2015-06-27 00:55:14 +00:00
|
|
|
|
|
|
|
// shuffle returns a new shuffled list of container controllers.
|
|
|
|
func shuffle(controllers []*api.ReplicationController) []*api.ReplicationController {
|
|
|
|
numControllers := len(controllers)
|
|
|
|
randIndexes := rand.Perm(numControllers)
|
|
|
|
shuffled := make([]*api.ReplicationController, numControllers)
|
|
|
|
for i := 0; i < numControllers; i++ {
|
|
|
|
shuffled[i] = controllers[randIndexes[i]]
|
|
|
|
}
|
|
|
|
return shuffled
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestOverlappingRCs(t *testing.T) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2015-06-27 00:55:14 +00:00
|
|
|
|
|
|
|
for i := 0; i < 5; i++ {
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
2015-06-27 00:55:14 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
|
|
|
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
|
|
|
|
var controllers []*api.ReplicationController
|
|
|
|
for j := 1; j < 10; j++ {
|
|
|
|
controllerSpec := newReplicationController(1)
|
2015-09-17 22:21:55 +00:00
|
|
|
controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
|
2015-06-27 00:55:14 +00:00
|
|
|
controllerSpec.Name = string(util.NewUUID())
|
|
|
|
controllers = append(controllers, controllerSpec)
|
|
|
|
}
|
|
|
|
shuffledControllers := shuffle(controllers)
|
2015-07-01 00:01:15 +00:00
|
|
|
for j := range shuffledControllers {
|
2015-07-27 22:41:00 +00:00
|
|
|
manager.rcStore.Store.Add(shuffledControllers[j])
|
2015-06-27 00:55:14 +00:00
|
|
|
}
|
|
|
|
// Add a pod and make sure only the oldest rc is synced
|
2016-03-11 18:34:13 +00:00
|
|
|
pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod")
|
2015-06-27 00:55:14 +00:00
|
|
|
rcKey := getKey(controllers[0], t)
|
|
|
|
|
|
|
|
manager.addPod(&pods.Items[0])
|
|
|
|
queueRC, _ := manager.queue.Get()
|
|
|
|
if queueRC != rcKey {
|
|
|
|
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-02-23 15:17:27 +00:00
|
|
|
|
2016-02-28 08:23:47 +00:00
|
|
|
func TestDeletionTimestamp(t *testing.T) {
|
2016-02-12 18:58:43 +00:00
|
|
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
2016-02-28 08:23:47 +00:00
|
|
|
manager.podStoreSynced = alwaysReady
|
|
|
|
|
|
|
|
controllerSpec := newReplicationController(1)
|
|
|
|
manager.rcStore.Store.Add(controllerSpec)
|
|
|
|
rcKey, err := controller.KeyFunc(controllerSpec)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
|
|
|
|
}
|
2016-03-11 18:34:13 +00:00
|
|
|
pod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0]
|
2016-03-23 23:45:24 +00:00
|
|
|
pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
2016-03-05 00:51:01 +00:00
|
|
|
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)})
|
2016-02-28 08:23:47 +00:00
|
|
|
|
|
|
|
// A pod added with a deletion timestamp should decrement deletions, not creations.
|
|
|
|
manager.addPod(&pod)
|
|
|
|
|
|
|
|
queueRC, _ := manager.queue.Get()
|
|
|
|
if queueRC != rcKey {
|
|
|
|
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
|
|
|
}
|
|
|
|
manager.queue.Done(rcKey)
|
|
|
|
|
|
|
|
podExp, exists, err := manager.expectations.GetExpectations(rcKey)
|
|
|
|
if !exists || err != nil || !podExp.Fulfilled() {
|
|
|
|
t.Fatalf("Wrong expectations %+v", podExp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// An update from no deletion timestamp to having one should be treated
|
|
|
|
// as a deletion.
|
2016-03-11 18:34:13 +00:00
|
|
|
oldPod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0]
|
2016-03-05 00:51:01 +00:00
|
|
|
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)})
|
2016-02-28 08:23:47 +00:00
|
|
|
manager.updatePod(&oldPod, &pod)
|
|
|
|
|
|
|
|
queueRC, _ = manager.queue.Get()
|
|
|
|
if queueRC != rcKey {
|
|
|
|
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
|
|
|
}
|
|
|
|
manager.queue.Done(rcKey)
|
|
|
|
|
|
|
|
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
|
|
|
if !exists || err != nil || !podExp.Fulfilled() {
|
|
|
|
t.Fatalf("Wrong expectations %+v", podExp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// An update to the pod (including an update to the deletion timestamp)
|
|
|
|
// should not be counted as a second delete.
|
2016-03-05 00:51:01 +00:00
|
|
|
secondPod := &api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Namespace: pod.Namespace,
|
|
|
|
Name: "secondPod",
|
|
|
|
Labels: pod.Labels,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(secondPod)})
|
2016-03-23 23:45:24 +00:00
|
|
|
oldPod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
2016-02-28 08:23:47 +00:00
|
|
|
manager.updatePod(&oldPod, &pod)
|
|
|
|
|
|
|
|
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
|
|
|
if !exists || err != nil || podExp.Fulfilled() {
|
|
|
|
t.Fatalf("Wrong expectations %+v", podExp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// A pod with a non-nil deletion timestamp should also be ignored by the
|
|
|
|
// delete handler, because it's already been counted in the update.
|
|
|
|
manager.deletePod(&pod)
|
|
|
|
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
|
|
|
if !exists || err != nil || podExp.Fulfilled() {
|
|
|
|
t.Fatalf("Wrong expectations %+v", podExp)
|
|
|
|
}
|
|
|
|
|
2016-03-05 00:51:01 +00:00
|
|
|
// Deleting the second pod should clear expectations.
|
|
|
|
manager.deletePod(secondPod)
|
2016-02-28 08:23:47 +00:00
|
|
|
|
|
|
|
queueRC, _ = manager.queue.Get()
|
|
|
|
if queueRC != rcKey {
|
|
|
|
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
|
|
|
}
|
|
|
|
manager.queue.Done(rcKey)
|
|
|
|
|
|
|
|
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
|
|
|
if !exists || err != nil || !podExp.Fulfilled() {
|
|
|
|
t.Fatalf("Wrong expectations %+v", podExp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 15:17:27 +00:00
|
|
|
func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
2016-02-12 18:58:43 +00:00
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2016-02-23 15:17:27 +00:00
|
|
|
|
|
|
|
const nsNum = 1000
|
|
|
|
|
|
|
|
pods := []api.Pod{}
|
|
|
|
for i := 0; i < nsNum; i++ {
|
|
|
|
ns := fmt.Sprintf("ns-%d", i)
|
|
|
|
for j := 0; j < 10; j++ {
|
|
|
|
rcName := fmt.Sprintf("rc-%d", j)
|
|
|
|
for k := 0; k < 10; k++ {
|
|
|
|
podName := fmt.Sprintf("pod-%d-%d", j, k)
|
|
|
|
pods = append(pods, api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: podName,
|
|
|
|
Namespace: ns,
|
|
|
|
Labels: map[string]string{"rcName": rcName},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < nsNum; i++ {
|
|
|
|
ns := fmt.Sprintf("ns-%d", i)
|
|
|
|
for j := 0; j < 10; j++ {
|
|
|
|
rcName := fmt.Sprintf("rc-%d", j)
|
|
|
|
manager.rcStore.Add(&api.ReplicationController{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: ns},
|
|
|
|
Spec: api.ReplicationControllerSpec{
|
|
|
|
Selector: map[string]string{"rcName": rcName},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
for _, pod := range pods {
|
|
|
|
manager.getPodController(&pod)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
2016-02-12 18:58:43 +00:00
|
|
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
2016-04-14 18:00:52 +00:00
|
|
|
manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
2016-02-23 15:17:27 +00:00
|
|
|
|
|
|
|
const rcNum = 1000
|
|
|
|
const replicaNum = 3
|
|
|
|
|
|
|
|
pods := []api.Pod{}
|
|
|
|
for i := 0; i < rcNum; i++ {
|
|
|
|
rcName := fmt.Sprintf("rc-%d", i)
|
|
|
|
for j := 0; j < replicaNum; j++ {
|
|
|
|
podName := fmt.Sprintf("pod-%d-%d", i, j)
|
|
|
|
pods = append(pods, api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: podName,
|
|
|
|
Namespace: "foo",
|
|
|
|
Labels: map[string]string{"rcName": rcName},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < rcNum; i++ {
|
|
|
|
rcName := fmt.Sprintf("rc-%d", i)
|
|
|
|
manager.rcStore.Add(&api.ReplicationController{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: "foo"},
|
|
|
|
Spec: api.ReplicationControllerSpec{
|
|
|
|
Selector: map[string]string{"rcName": rcName},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
for _, pod := range pods {
|
|
|
|
manager.getPodController(&pod)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|