mirror of https://github.com/k3s-io/k3s
Fix scale x->x in kubectl for ReplicationController
parent
bdeeb9db90
commit
79d8c9754d
|
@ -198,27 +198,45 @@ func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if waitForReplicas != nil {
|
if waitForReplicas != nil {
|
||||||
watchOptions := api.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", name), ResourceVersion: updatedResourceVersion}
|
checkRC := func(rc *api.ReplicationController) bool {
|
||||||
watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
|
if uint(rc.Spec.Replicas) != newSize {
|
||||||
|
// the size is changed by other party. Don't need to wait for the new change to complete.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas
|
||||||
|
}
|
||||||
|
// If number of replicas doesn't change, then the update may not event
|
||||||
|
// be sent to underlying databse (we don't send no-op changes).
|
||||||
|
// In such case, <updatedResourceVersion> will have value of the most
|
||||||
|
// recent update (which may be far in the past) so we may get "too old
|
||||||
|
// RV" error from watch or potentially no ReplicationController events
|
||||||
|
// will be deliver, since it may already be in the expected state.
|
||||||
|
// To protect from these two, we first issue Get() to ensure that we
|
||||||
|
// are not already in the expected state.
|
||||||
|
currentRC, err := scaler.c.ReplicationControllers(namespace).Get(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
|
if !checkRC(currentRC) {
|
||||||
if event.Type != watch.Added && event.Type != watch.Modified {
|
watchOptions := api.ListOptions{
|
||||||
return false, nil
|
FieldSelector: fields.OneTermEqualSelector("metadata.name", name),
|
||||||
|
ResourceVersion: updatedResourceVersion,
|
||||||
}
|
}
|
||||||
|
watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
|
||||||
rc := event.Object.(*api.ReplicationController)
|
if err != nil {
|
||||||
if uint(rc.Spec.Replicas) != newSize {
|
return err
|
||||||
// the size is changed by other party. Don't need to wait for the new change to complete.
|
|
||||||
return true, nil
|
|
||||||
}
|
}
|
||||||
return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas, nil
|
_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
|
||||||
})
|
if event.Type != watch.Added && event.Type != watch.Modified {
|
||||||
if err == wait.ErrWaitTimeout {
|
return false, nil
|
||||||
return fmt.Errorf("timed out waiting for %q to be synced", name)
|
}
|
||||||
|
return checkRC(event.Object.(*api.ReplicationController)), nil
|
||||||
|
})
|
||||||
|
if err == wait.ErrWaitTimeout {
|
||||||
|
return fmt.Errorf("timed out waiting for %q to be synced", name)
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ func TestReplicationControllerStop(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
StopError: nil,
|
StopError: nil,
|
||||||
ExpectedActions: []string{"get", "list", "get", "update", "watch", "delete"},
|
ExpectedActions: []string{"get", "list", "get", "update", "get", "delete"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "NoOverlapping",
|
Name: "NoOverlapping",
|
||||||
|
@ -108,7 +108,7 @@ func TestReplicationControllerStop(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
StopError: nil,
|
StopError: nil,
|
||||||
ExpectedActions: []string{"get", "list", "get", "update", "watch", "delete"},
|
ExpectedActions: []string{"get", "list", "get", "update", "get", "delete"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "OverlappingError",
|
Name: "OverlappingError",
|
||||||
|
|
Loading…
Reference in New Issue