Merge pull request #31698 from m1093782566/m109-pet-set-errs-err

Automatic merge from submit-queue

[Pet Set] Refactor return []error to error

<!--  Thanks for sending a pull request!  Here are some tips for you:
1. If this is your first time, read our contributor guidelines https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md and developer guide https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md
2. If you want *faster* PR reviews, read how: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/faster_reviews.md
3. Follow the instructions for writing a release note: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/pull-requests.md#release-notes
-->

**Which issue this PR fixes** 

I propose refactor return value from `[]error` to error [here](https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/petset/pet_set.go#L80), for the purpose of simplify code and format error output.

I think we can use `errors.NewAggregate(errorList)` to aggregate `[]error`.
pull/6/head
Kubernetes Submit Queue 2016-09-05 22:09:07 -07:00 committed by GitHub
commit 562e1747a8
2 changed files with 51 additions and 46 deletions

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/errors"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
@ -77,7 +78,7 @@ type PetSetController struct {
// syncHandler handles sync events for petsets.
// Abstracted as a func to allow injection for testing.
syncHandler func(psKey string) []error
syncHandler func(psKey string) error
}
// NewPetSetController creates a new petset controller.
@ -266,8 +267,8 @@ func (psc *PetSetController) worker() {
return
}
defer psc.queue.Done(key)
if errs := psc.syncHandler(key.(string)); len(errs) != 0 {
glog.Errorf("Error syncing PetSet %v, requeuing: %v", key.(string), errs)
if err := psc.syncHandler(key.(string)); err != nil {
glog.Errorf("Error syncing PetSet %v, requeuing: %v", key.(string), err)
psc.queue.AddRateLimited(key)
} else {
psc.queue.Forget(key)
@ -277,7 +278,7 @@ func (psc *PetSetController) worker() {
}
// Sync syncs the given petset.
func (psc *PetSetController) Sync(key string) []error {
func (psc *PetSetController) Sync(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing pet set %q (%v)", key, time.Now().Sub(startTime))
@ -286,45 +287,45 @@ func (psc *PetSetController) Sync(key string) []error {
if !psc.podStoreSynced() {
// Sleep so we give the pod reflector goroutine a chance to run.
time.Sleep(PodStoreSyncedPollPeriod)
return []error{fmt.Errorf("waiting for pods controller to sync")}
return fmt.Errorf("waiting for pods controller to sync")
}
obj, exists, err := psc.psStore.Store.GetByKey(key)
if !exists {
if err = psc.blockingPetStore.store.Delete(key); err != nil {
return []error{err}
return err
}
glog.Infof("PetSet has been deleted %v", key)
return []error{}
return nil
}
if err != nil {
glog.Errorf("Unable to retrieve PetSet %v from store: %v", key, err)
return []error{err}
return err
}
ps := *obj.(*apps.PetSet)
petList, err := psc.getPodsForPetSet(&ps)
if err != nil {
return []error{err}
return err
}
numPets, errs := psc.syncPetSet(&ps, petList)
if err := updatePetCount(psc.kubeClient, ps, numPets); err != nil {
glog.Infof("Failed to update replica count for petset %v/%v; requeuing; error: %v", ps.Namespace, ps.Name, err)
errs = append(errs, err)
numPets, syncErr := psc.syncPetSet(&ps, petList)
if updateErr := updatePetCount(psc.kubeClient, ps, numPets); updateErr != nil {
glog.Infof("Failed to update replica count for petset %v/%v; requeuing; error: %v", ps.Namespace, ps.Name, updateErr)
return errors.NewAggregate([]error{syncErr, updateErr})
}
return errs
return syncErr
}
// syncPetSet syncs a tuple of (petset, pets).
func (psc *PetSetController) syncPetSet(ps *apps.PetSet, pets []*api.Pod) (int, []error) {
func (psc *PetSetController) syncPetSet(ps *apps.PetSet, pets []*api.Pod) (int, error) {
glog.Infof("Syncing PetSet %v/%v with %d pets", ps.Namespace, ps.Name, len(pets))
it := NewPetSetIterator(ps, pets)
blockingPet, err := psc.blockingPetStore.Get(ps, pets)
if err != nil {
return 0, []error{err}
return 0, err
}
if blockingPet != nil {
glog.Infof("PetSet %v blocked from scaling on pet %v", ps.Name, blockingPet.pod.Name)
@ -357,5 +358,5 @@ func (psc *PetSetController) syncPetSet(ps *apps.PetSet, pets []*api.Pod) (int,
// TODO: GC pvcs. We can't delete them per pet because of grace period, and
// in fact we *don't want to* till petset is stable to guarantee that bugs
// in the controller don't corrupt user data.
return numPets, it.errs
return numPets, errors.NewAggregate(it.errs)
}

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/errors"
)
func newFakePetSetController() (*PetSetController, *fakePetClient) {
@ -67,25 +68,26 @@ func checkPets(ps *apps.PetSet, creates, deletes int, fc *fakePetClient, t *test
}
}
func scalePetSet(t *testing.T, ps *apps.PetSet, psc *PetSetController, fc *fakePetClient, scale int) []error {
func scalePetSet(t *testing.T, ps *apps.PetSet, psc *PetSetController, fc *fakePetClient, scale int) error {
errs := []error{}
for i := 0; i < scale; i++ {
pl := fc.getPodList()
if len(pl) != i {
t.Errorf("Unexpected number of pets, expected %d found %d", i, len(fc.pets))
}
_, syncErrs := psc.syncPetSet(ps, pl)
errs = append(errs, syncErrs...)
if _, syncErr := psc.syncPetSet(ps, pl); syncErr != nil {
errs = append(errs, syncErr)
}
fc.setHealthy(i)
checkPets(ps, i+1, 0, fc, t)
}
return errs
return errors.NewAggregate(errs)
}
func saturatePetSet(t *testing.T, ps *apps.PetSet, psc *PetSetController, fc *fakePetClient) {
errs := scalePetSet(t, ps, psc, fc, ps.Spec.Replicas)
if len(errs) != 0 {
t.Errorf("%v", errs)
err := scalePetSet(t, ps, psc, fc, ps.Spec.Replicas)
if err != nil {
t.Errorf("Error scalePetSet: %v", err)
}
}
@ -99,8 +101,8 @@ func TestPetSetControllerCreates(t *testing.T) {
podList := fc.getPodList()
// Deleted pet gets recreated
fc.pets = fc.pets[:replicas-1]
if _, errs := psc.syncPetSet(ps, podList); len(errs) != 0 {
t.Errorf("%v", errs)
if _, err := psc.syncPetSet(ps, podList); err != nil {
t.Errorf("Error syncing PetSet: %v", err)
}
checkPets(ps, replicas+1, 0, fc, t)
}
@ -120,11 +122,12 @@ func TestPetSetControllerDeletes(t *testing.T) {
if len(fc.pets) != i+1 {
t.Errorf("Unexpected number of pets, expected %d found %d", i, len(fc.pets))
}
_, syncErrs := psc.syncPetSet(ps, knownPods)
errs = append(errs, syncErrs...)
if _, syncErr := psc.syncPetSet(ps, knownPods); syncErr != nil {
errs = append(errs, syncErr)
}
}
if len(errs) != 0 {
t.Errorf("%v", errs)
t.Errorf("Error syncing PetSet: %v", errors.NewAggregate(errs))
}
checkPets(ps, replicas, replicas, fc, t)
}
@ -138,9 +141,9 @@ func TestPetSetControllerRespectsTermination(t *testing.T) {
fc.setDeletionTimestamp(replicas - 1)
ps.Spec.Replicas = 2
_, errs := psc.syncPetSet(ps, fc.getPodList())
if len(errs) != 0 {
t.Errorf("%v", errs)
_, err := psc.syncPetSet(ps, fc.getPodList())
if err != nil {
t.Errorf("Error syncing PetSet: %v", err)
}
// Finding a pod with the deletion timestamp will pause all deletions.
knownPods := fc.getPodList()
@ -148,9 +151,9 @@ func TestPetSetControllerRespectsTermination(t *testing.T) {
t.Errorf("Pods deleted prematurely before deletion timestamp expired, len %d", len(knownPods))
}
fc.pets = fc.pets[:replicas-1]
_, errs = psc.syncPetSet(ps, fc.getPodList())
if len(errs) != 0 {
t.Errorf("%v", errs)
_, err = psc.syncPetSet(ps, fc.getPodList())
if err != nil {
t.Errorf("Error syncing PetSet: %v", err)
}
checkPets(ps, replicas, 1, fc, t)
}
@ -175,12 +178,13 @@ func TestPetSetControllerRespectsOrder(t *testing.T) {
if len(fc.pets) != replicas-i {
t.Errorf("Unexpected number of pets, expected %d found %d", i, len(fc.pets))
}
_, syncErrs := psc.syncPetSet(ps, knownPods)
errs = append(errs, syncErrs...)
if _, syncErr := psc.syncPetSet(ps, knownPods); syncErr != nil {
errs = append(errs, syncErr)
}
checkPets(ps, replicas, i+1, fc, t)
}
if len(errs) != 0 {
t.Errorf("%v", errs)
t.Errorf("Error syncing PetSet: %v", errors.NewAggregate(errs))
}
}
@ -193,15 +197,15 @@ func TestPetSetControllerBlocksScaling(t *testing.T) {
// Create 4th pet, then before flipping it to healthy, kill the first pet.
// There should only be 1 not-healty pet at a time.
pl := fc.getPodList()
if _, errs := psc.syncPetSet(ps, pl); len(errs) != 0 {
t.Errorf("%v", errs)
if _, err := psc.syncPetSet(ps, pl); err != nil {
t.Errorf("Error syncing PetSet: %v", err)
}
deletedPod := pl[0]
fc.deletePetAtIndex(0)
pl = fc.getPodList()
if _, errs := psc.syncPetSet(ps, pl); len(errs) != 0 {
t.Errorf("%v", errs)
if _, err := psc.syncPetSet(ps, pl); err != nil {
t.Errorf("Error syncing PetSet: %v", err)
}
newPodList := fc.getPodList()
for _, p := range newPodList {
@ -211,8 +215,8 @@ func TestPetSetControllerBlocksScaling(t *testing.T) {
}
fc.setHealthy(len(newPodList) - 1)
if _, errs := psc.syncPetSet(ps, pl); len(errs) != 0 {
t.Errorf("%v", errs)
if _, err := psc.syncPetSet(ps, pl); err != nil {
t.Errorf("Error syncing PetSet: %v", err)
}
found := false
@ -247,8 +251,8 @@ func TestPetSetBlockingPetIsCleared(t *testing.T) {
if err := psc.psStore.Store.Delete(ps); err != nil {
t.Fatalf("Unable to delete pet %v from petset controller store.", ps.Name)
}
if errs := psc.Sync(fmt.Sprintf("%v/%v", ps.Namespace, ps.Name)); len(errs) != 0 {
t.Errorf("Error during sync of deleted petset %v", errs)
if err := psc.Sync(fmt.Sprintf("%v/%v", ps.Namespace, ps.Name)); err != nil {
t.Errorf("Error during sync of deleted petset %v", err)
}
fc.pets = []*pcb{}
fc.petsCreated = 0