mirror of https://github.com/k3s-io/k3s
Merge pull request #63744 from krmayankk/changelog
Automatic merge from submit-queue (batch tested with PRs 63580, 63744, 64541, 64502, 64100). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. remove redundant getKey functions from controller tests ```release-note None ```pull/8/head
commit
65819a8f92
|
@ -84,15 +84,6 @@ var (
|
|||
}}
|
||||
)
|
||||
|
||||
func getKey(ds *apps.DaemonSet, t *testing.T) string {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error getting key for ds %v: %v", ds.Name, err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func newDaemonSet(name string) *apps.DaemonSet {
|
||||
two := int32(2)
|
||||
return &apps.DaemonSet{
|
||||
|
|
|
@ -71,6 +71,7 @@ go_test(
|
|||
"//pkg/apis/storage/install:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
|
|
|
@ -45,6 +45,7 @@ import (
|
|||
_ "k8s.io/kubernetes/pkg/apis/storage/install"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -77,7 +78,7 @@ func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map
|
|||
|
||||
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment {
|
||||
d := apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
|
@ -120,6 +121,7 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
|||
|
||||
func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "ReplicaSet"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
UID: uuid.NewUUID(),
|
||||
|
@ -135,15 +137,6 @@ func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaS
|
|||
}
|
||||
}
|
||||
|
||||
func getKey(d *apps.Deployment, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(d); err != nil {
|
||||
t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err)
|
||||
return ""
|
||||
} else {
|
||||
return key
|
||||
}
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
t *testing.T
|
||||
|
||||
|
@ -285,7 +278,7 @@ func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
|
|||
f.expectUpdateDeploymentStatusAction(d)
|
||||
f.expectUpdateDeploymentStatusAction(d)
|
||||
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
||||
|
@ -298,7 +291,7 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
|||
f.objects = append(f.objects, d)
|
||||
|
||||
f.expectUpdateDeploymentStatusAction(d)
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
func TestSyncDeploymentDeletionRace(t *testing.T) {
|
||||
|
@ -323,7 +316,7 @@ func TestSyncDeploymentDeletionRace(t *testing.T) {
|
|||
f.expectGetDeploymentAction(d)
|
||||
// Sync should fail and requeue to let cache catch up.
|
||||
// Don't start informers, since we don't want cache to catch up for this test.
|
||||
f.runExpectError(getKey(d, t), false)
|
||||
f.runExpectError(testutil.GetKey(d, t), false)
|
||||
}
|
||||
|
||||
// issue: https://github.com/kubernetes/kubernetes/issues/23218
|
||||
|
@ -337,7 +330,7 @@ func TestDontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
|
|||
|
||||
// Normally there should be a status update to sync observedGeneration but the fake
|
||||
// deployment has no generation set so there is no action happpening here.
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
func TestReentrantRollback(t *testing.T) {
|
||||
|
@ -364,7 +357,7 @@ func TestReentrantRollback(t *testing.T) {
|
|||
// Rollback is done here
|
||||
f.expectUpdateDeploymentAction(d)
|
||||
// Expect no update on replica sets though
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
// TestPodDeletionEnqueuesRecreateDeployment ensures that the deletion of a pod
|
||||
|
|
|
@ -49,6 +49,7 @@ go_test(
|
|||
deps = [
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
|
|
|
@ -40,12 +40,14 @@ import (
|
|||
"k8s.io/client-go/util/workqueue"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
)
|
||||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func newJob(parallelism, completions, backoffLimit int32) *batch.Job {
|
||||
j := &batch.Job{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foobar",
|
||||
UID: uuid.NewUUID(),
|
||||
|
@ -86,15 +88,6 @@ func newJob(parallelism, completions, backoffLimit int32) *batch.Job {
|
|||
return j
|
||||
}
|
||||
|
||||
func getKey(job *batch.Job, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(job); err != nil {
|
||||
t.Errorf("Unexpected error getting key for job %v: %v", job.Name, err)
|
||||
return ""
|
||||
} else {
|
||||
return key
|
||||
}
|
||||
}
|
||||
|
||||
func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) {
|
||||
sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod())
|
||||
jm := NewJobController(sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient)
|
||||
|
@ -301,7 +294,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||
setPodsStatuses(podIndexer, job, tc.pendingPods, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||
|
||||
// run
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
|
||||
// We need requeue syncJob task if podController error
|
||||
if tc.podControllerError != nil {
|
||||
|
@ -388,7 +381,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||
failedPods int32
|
||||
|
||||
// expectations
|
||||
expectedForgetKey bool
|
||||
expectedForGetKey bool
|
||||
expectedDeletions int32
|
||||
expectedActive int32
|
||||
expectedSucceeded int32
|
||||
|
@ -441,12 +434,12 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||
setPodsStatuses(podIndexer, job, 0, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||
|
||||
// run
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err != nil {
|
||||
t.Errorf("%s: unexpected error when syncing jobs %v", name, err)
|
||||
}
|
||||
if forget != tc.expectedForgetKey {
|
||||
t.Errorf("%s: unexpected forget value. Expected %v, saw %v\n", name, tc.expectedForgetKey, forget)
|
||||
if forget != tc.expectedForGetKey {
|
||||
t.Errorf("%s: unexpected forget value. Expected %v, saw %v\n", name, tc.expectedForGetKey, forget)
|
||||
}
|
||||
// validate created/deleted pods
|
||||
if int32(len(fakePodControl.Templates)) != 0 {
|
||||
|
@ -504,7 +497,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) {
|
|||
job.Status.StartTime = &start
|
||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when syncing jobs %v", err)
|
||||
}
|
||||
|
@ -533,7 +526,7 @@ func TestSyncJobComplete(t *testing.T) {
|
|||
job := newJob(1, 1, 6)
|
||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", ""))
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when syncing jobs %v", err)
|
||||
}
|
||||
|
@ -559,7 +552,7 @@ func TestSyncJobDeleted(t *testing.T) {
|
|||
manager.jobStoreSynced = alwaysReady
|
||||
manager.updateHandler = func(job *batch.Job) error { return nil }
|
||||
job := newJob(2, 2, 6)
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when syncing jobs %v", err)
|
||||
}
|
||||
|
@ -584,12 +577,12 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
|||
manager.jobStoreSynced = alwaysReady
|
||||
updateError := fmt.Errorf("Update error")
|
||||
manager.updateHandler = func(job *batch.Job) error {
|
||||
manager.queue.AddRateLimited(getKey(job, t))
|
||||
manager.queue.AddRateLimited(testutil.GetKey(job, t))
|
||||
return updateError
|
||||
}
|
||||
job := newJob(2, 2, 6)
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err == nil || err != updateError {
|
||||
t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
|
||||
}
|
||||
|
@ -598,7 +591,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
|||
}
|
||||
t.Log("Waiting for a job in the queue")
|
||||
key, _ := manager.queue.Get()
|
||||
expectedKey := getKey(job, t)
|
||||
expectedKey := testutil.GetKey(job, t)
|
||||
if key != expectedKey {
|
||||
t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key)
|
||||
}
|
||||
|
@ -1160,7 +1153,7 @@ func TestSyncJobExpectations(t *testing.T) {
|
|||
podIndexer.Add(&pods[1])
|
||||
},
|
||||
}
|
||||
manager.syncJob(getKey(job, t))
|
||||
manager.syncJob(testutil.GetKey(job, t))
|
||||
if len(fakePodControl.Templates) != 0 {
|
||||
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
|
||||
}
|
||||
|
@ -1314,7 +1307,7 @@ func TestJobBackoffReset(t *testing.T) {
|
|||
|
||||
// job & pods setup
|
||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
||||
key := getKey(job, t)
|
||||
key := testutil.GetKey(job, t)
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||
|
||||
|
@ -1472,7 +1465,7 @@ func TestJobBackoffForOnFailure(t *testing.T) {
|
|||
}
|
||||
|
||||
// run
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error syncing job. Got %#v", err)
|
||||
|
|
|
@ -51,6 +51,7 @@ go_test(
|
|||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
|
|
@ -47,6 +47,7 @@ import (
|
|||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
. "k8s.io/kubernetes/pkg/controller/testutil"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
)
|
||||
|
||||
|
@ -78,18 +79,9 @@ func skipListerFunc(verb string, url url.URL) bool {
|
|||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func getKey(rs *apps.ReplicaSet, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(rs); err != nil {
|
||||
t.Errorf("Unexpected error getting key for ReplicaSet %v: %v", rs.Name, err)
|
||||
return ""
|
||||
} else {
|
||||
return key
|
||||
}
|
||||
}
|
||||
|
||||
func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet {
|
||||
rs := &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ReplicaSet"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
|
@ -216,7 +208,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
|||
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||
}
|
||||
|
||||
|
@ -244,7 +236,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
|||
|
||||
go manager.worker()
|
||||
|
||||
expected := getKey(rsSpec, t)
|
||||
expected := GetKey(rsSpec, t)
|
||||
select {
|
||||
case key := <-received:
|
||||
if key != expected {
|
||||
|
@ -271,7 +263,7 @@ func TestSyncReplicaSetCreateFailures(t *testing.T) {
|
|||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
manager.syncReplicaSet(getKey(rs, t))
|
||||
manager.syncReplicaSet(GetKey(rs, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, fakePodControl.CreateLimit, 0, 0)
|
||||
expectedLimit := 0
|
||||
for pass := uint8(0); expectedLimit <= fakePodControl.CreateLimit; pass++ {
|
||||
|
@ -310,7 +302,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||
rsSpec.Status.Replicas = 1
|
||||
rsSpec.Status.ReadyReplicas = 1
|
||||
rsSpec.Status.AvailableReplicas = 1
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
||||
|
||||
// Expectations prevents replicas but not an update on status
|
||||
|
@ -318,7 +310,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||
rsSpec.Status.ReadyReplicas = 0
|
||||
rsSpec.Status.AvailableReplicas = 0
|
||||
fakePodControl.Clear()
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||
|
||||
// Get the key for the controller
|
||||
|
@ -336,13 +328,13 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||
fakePodControl.Clear()
|
||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
||||
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
||||
|
||||
// This replica should not need a Lowering of expectations, since the previous create failed
|
||||
fakePodControl.Clear()
|
||||
fakePodControl.Err = nil
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
||||
|
||||
// 2 PUT for the ReplicaSet status during dormancy window.
|
||||
|
@ -746,7 +738,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||
|
||||
for i := 0; i < numReplicas; i += burstReplicas {
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||
|
||||
// The store accrues active pods. It's also used by the ReplicaSet to determine how many
|
||||
// replicas to create.
|
||||
|
@ -785,7 +777,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
|
||||
// To accurately simulate a watch we must delete the exact pods
|
||||
// the rs is waiting for.
|
||||
expectedDels := manager.expectations.GetUIDs(getKey(rsSpec, t))
|
||||
expectedDels := manager.expectations.GetUIDs(GetKey(rsSpec, t))
|
||||
podsToDelete := []*v1.Pod{}
|
||||
isController := true
|
||||
for _, key := range expectedDels.List() {
|
||||
|
@ -819,7 +811,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
|
||||
// Check that the ReplicaSet didn't take any action for all the above pods
|
||||
fakePodControl.Clear()
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||
|
||||
// Create/Delete the last pod
|
||||
|
@ -829,7 +821,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[expectedPods-1])
|
||||
manager.addPod(&pods.Items[expectedPods-1])
|
||||
} else {
|
||||
expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t))
|
||||
expectedDel := manager.expectations.GetUIDs(GetKey(rsSpec, t))
|
||||
if expectedDel.Len() != 1 {
|
||||
t.Fatalf("Waiting on unexpected number of deletes.")
|
||||
}
|
||||
|
@ -903,7 +895,7 @@ func TestRSSyncExpectations(t *testing.T) {
|
|||
informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod)
|
||||
},
|
||||
})
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||
}
|
||||
|
||||
|
@ -920,7 +912,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||
manager.podControl = &fakePodControl
|
||||
|
||||
// This should set expectations for the ReplicaSet
|
||||
manager.syncReplicaSet(getKey(rs, t))
|
||||
manager.syncReplicaSet(GetKey(rs, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
||||
fakePodControl.Clear()
|
||||
|
||||
|
@ -937,7 +929,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||
t.Errorf("No expectations found for ReplicaSet")
|
||||
}
|
||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs)
|
||||
manager.syncReplicaSet(getKey(rs, t))
|
||||
manager.syncReplicaSet(GetKey(rs, t))
|
||||
|
||||
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
|
||||
t.Errorf("Found expectaions, expected none since the ReplicaSet has been deleted.")
|
||||
|
@ -946,7 +938,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||
// This should have no effect, since we've deleted the ReplicaSet.
|
||||
podExp.Add(-1, 0)
|
||||
informers.Core().V1().Pods().Informer().GetIndexer().Replace(make([]interface{}, 0), "0")
|
||||
manager.syncReplicaSet(getKey(rs, t))
|
||||
manager.syncReplicaSet(GetKey(rs, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||
}
|
||||
|
||||
|
@ -995,7 +987,7 @@ func TestOverlappingRSs(t *testing.T) {
|
|||
pod.OwnerReferences = []metav1.OwnerReference{
|
||||
{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name, Controller: &isController},
|
||||
}
|
||||
rsKey := getKey(rs, t)
|
||||
rsKey := GetKey(rs, t)
|
||||
|
||||
manager.addPod(pod)
|
||||
queueRS, _ := manager.queue.Get()
|
||||
|
@ -1123,7 +1115,7 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
|||
pod := newPod("pod", rs, v1.PodRunning, nil, true)
|
||||
pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
|
||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
|
||||
err := manager.syncReplicaSet(getKey(rs, t))
|
||||
err := manager.syncReplicaSet(GetKey(rs, t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1145,7 +1137,7 @@ func TestPatchPodFails(t *testing.T) {
|
|||
// let both patches fail. The rs controller will assume it fails to take
|
||||
// control of the pods and requeue to try again.
|
||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
||||
rsKey := getKey(rs, t)
|
||||
rsKey := GetKey(rs, t)
|
||||
err := processSync(manager, rsKey)
|
||||
if err == nil || !strings.Contains(err.Error(), "Fake Error") {
|
||||
t.Errorf("expected Fake Error, got %+v", err)
|
||||
|
@ -1174,7 +1166,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
|||
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
|
||||
|
||||
// no patch, no create
|
||||
err := manager.syncReplicaSet(getKey(rs, t))
|
||||
err := manager.syncReplicaSet(GetKey(rs, t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1200,7 +1192,7 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
|
|||
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
|
||||
|
||||
// sync should abort.
|
||||
err := manager.syncReplicaSet(getKey(rs, t))
|
||||
err := manager.syncReplicaSet(GetKey(rs, t))
|
||||
if err == nil {
|
||||
t.Error("syncReplicaSet() err = nil, expected non-nil")
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -20,7 +20,9 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -38,6 +40,7 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
@ -46,6 +49,10 @@ import (
|
|||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
|
||||
// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It
|
||||
// allows test cases to have fine-grained control over mock behaviors. We also need
|
||||
// PodsInterface and PodInterface to test list & delet pods, which is implemented in
|
||||
|
@ -485,3 +492,27 @@ func GetZones(nodeHandler *FakeNodeHandler) []string {
|
|||
func CreateZoneID(region, zone string) string {
|
||||
return region + ":\x00:" + zone
|
||||
}
|
||||
|
||||
// GetKey is a helper function used by controllers unit tests to get the
|
||||
// key for a given kubernetes resource.
|
||||
func GetKey(obj interface{}, t *testing.T) string {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if ok {
|
||||
// if tombstone , try getting the value from tombstone.Obj
|
||||
obj = tombstone.Obj
|
||||
}
|
||||
val := reflect.ValueOf(obj).Elem()
|
||||
name := val.FieldByName("Name").String()
|
||||
kind := val.FieldByName("Kind").String()
|
||||
// Note kind is not always set in the tests, so ignoring that for now
|
||||
if len(name) == 0 || len(kind) == 0 {
|
||||
t.Errorf("Unexpected object %v", obj)
|
||||
}
|
||||
|
||||
key, err := keyFunc(obj)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error getting key for %v %v: %v", kind, name, err)
|
||||
return ""
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue