mirror of https://github.com/k3s-io/k3s
Namespace controller error handling improvements
parent
649b6879d7
commit
a301a2565e
|
@ -112,11 +112,16 @@ func (nm *NamespaceController) worker() {
|
|||
if err := nm.syncNamespaceFromKey(key.(string)); err != nil {
|
||||
if estimate, ok := err.(*contentRemainingError); ok {
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
t := estimate.Estimate/2 + 1
|
||||
glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t)
|
||||
time.Sleep(time.Duration(t) * time.Second)
|
||||
nm.queue.Add(key)
|
||||
}()
|
||||
} else {
|
||||
// rather than wait for a full resync, re-add the namespace to the queue to be processed
|
||||
nm.queue.Add(key)
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -244,6 +244,12 @@ func syncNamespace(kubeClient clientset.Interface, versions *unversioned.APIVers
|
|||
// we have removed content, so mark it finalized by us
|
||||
result, err := retryOnConflictError(kubeClient, namespace, finalizeNamespaceFunc)
|
||||
if err != nil {
|
||||
// in normal practice, this should not be possible, but if a deployment is running
|
||||
// two controllers to do namespace deletion that share a common finalizer token it's
|
||||
// possible that a not found could occur since the other controller would have finished the delete.
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue