fix bug on kubectl deleting uninitialized resources

pull/6/head
Di Xu 2017-08-23 17:25:16 +08:00
parent 4a6bbb9f50
commit 3bc47924a9
2 changed files with 11 additions and 0 deletions

View File

@ -404,6 +404,11 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati
if err != nil {
return err
}
if deployment.Initializers != nil {
var falseVar = false
nonOrphanOption := metav1.DeleteOptions{OrphanDependents: &falseVar}
return deployments.Delete(name, &nonOrphanOption)
}
// Use observedGeneration to determine if the deployment controller noticed the pause.
if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) {

View File

@ -305,6 +305,9 @@ func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, prec
if err != nil {
return err
}
if rs.Initializers != nil {
return nil
}
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.ReplicaSetHasDesiredReplicas(scaler.c, rs))
if err == wait.ErrWaitTimeout {
@ -373,6 +376,9 @@ func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, pre
if err != nil {
return err
}
if job.Initializers != nil {
return nil
}
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.StatefulSetHasDesiredReplicas(scaler.c, job))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)