Tone down scheduler logging

Having no nodes in the cluster is unusual and is likely a test
environment, and when a pod is deleted there is no need to log
information about our inability to schedule it.
pull/6/head
Clayton Coleman 2015-05-16 19:46:50 -04:00
parent 78ad32a538
commit d00f705652
2 changed files with 12 additions and 3 deletions

View File

@ -24,6 +24,7 @@ import (
"time" "time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework" "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework"
@ -244,7 +245,11 @@ func (factory *ConfigFactory) createServiceLW() *cache.ListWatch {
func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) { func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) {
return func(pod *api.Pod, err error) { return func(pod *api.Pod, err error) {
glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err) if err == scheduler.ErrNoNodesAvailable {
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
} else {
glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err)
}
backoff.gc() backoff.gc()
// Retry asynchronously. // Retry asynchronously.
// Note that this is extremely rudimentary and we need a more real error handling path. // Note that this is extremely rudimentary and we need a more real error handling path.
@ -257,7 +262,9 @@ func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue
pod = &api.Pod{} pod = &api.Pod{}
err := factory.Client.Get().Namespace(podNamespace).Resource("pods").Name(podID).Do().Into(pod) err := factory.Client.Get().Namespace(podNamespace).Resource("pods").Name(podID).Do().Into(pod)
if err != nil { if err != nil {
glog.Errorf("Error getting pod %v for retry: %v; abandoning", podID, err) if !errors.IsNotFound(err) {
glog.Errorf("Error getting pod %v for retry: %v; abandoning", podID, err)
}
return return
} }
if pod.Spec.Host == "" { if pod.Spec.Host == "" {

View File

@ -36,6 +36,8 @@ type FitError struct {
FailedPredicates FailedPredicateMap FailedPredicates FailedPredicateMap
} }
var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods")
// implementation of the error interface // implementation of the error interface
func (f *FitError) Error() string { func (f *FitError) Error() string {
output := fmt.Sprintf("failed to find fit for pod: %v", f.Pod) output := fmt.Sprintf("failed to find fit for pod: %v", f.Pod)
@ -59,7 +61,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionL
return "", err return "", err
} }
if len(minions.Items) == 0 { if len(minions.Items) == 0 {
return "", fmt.Errorf("no minions available to schedule pods") return "", ErrNoNodesAvailable
} }
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.pods, g.predicates, minions) filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.pods, g.predicates, minions)