Merge pull request #14715 from zhengguoyong/capital

set capital in some glog files
pull/6/head
Brendan Burns 2015-09-30 11:28:05 -07:00
commit 2a6c7a1e79
13 changed files with 31 additions and 31 deletions

View File

@ -336,7 +336,7 @@ func (ks *kube2sky) generateSRVRecord(subdomain, portSegment, recordName, cName
func (ks *kube2sky) addDNS(subdomain string, service *kapi.Service) error {
if len(service.Spec.Ports) == 0 {
glog.Fatalf("unexpected service with no ports: %v", service)
glog.Fatalf("Unexpected service with no ports: %v", service)
}
// if ClusterIP is not set, a DNS entry should not be created
if !kapi.IsServiceIPSet(service) {

View File

@ -71,7 +71,7 @@ func (e *etcdMasterElector) run(path, id string) {
Object: Master(m),
}
case e := <-errors:
glog.Errorf("error in election: %v", e)
glog.Errorf("Error in election: %v", e)
}
}
}

View File

@ -57,7 +57,7 @@ func (t *suicideTracker) Next(d time.Duration, driver bindings.ExecutorDriver, f
func (t *suicideTracker) makeJumper(_ jumper) jumper {
return jumper(func(driver bindings.ExecutorDriver, cancel <-chan struct{}) {
glog.Warningln("jumping?!")
glog.Warningln("Jumping?!")
if t.jumps != nil {
atomic.AddUint32(t.jumps, 1)
}
@ -103,7 +103,7 @@ func TestSuicide_WithTasks(t *testing.T) {
k.tasks["foo"] = &kuberTask{} // prevent suicide attempts from succeeding
// call reset with a nil timer
glog.Infoln("resetting suicide watch with 1 task")
glog.Infoln("Resetting suicide watch with 1 task")
select {
case <-k.resetSuicideWatch(nil):
tracker = k.suicideWatch.(*suicideTracker)
@ -125,7 +125,7 @@ func TestSuicide_WithTasks(t *testing.T) {
suicideStart := time.Now()
// reset the suicide watch, which should actually start a timer now
glog.Infoln("resetting suicide watch with 0 tasks")
glog.Infoln("Resetting suicide watch with 0 tasks")
select {
case <-k.resetSuicideWatch(nil):
tracker = k.suicideWatch.(*suicideTracker)
@ -147,7 +147,7 @@ func TestSuicide_WithTasks(t *testing.T) {
k.lock.Unlock()
// reset the suicide watch, which should stop the existing timer
glog.Infoln("resetting suicide watch with 1 task")
glog.Infoln("Resetting suicide watch with 1 task")
select {
case <-k.resetSuicideWatch(nil):
tracker = k.suicideWatch.(*suicideTracker)
@ -169,7 +169,7 @@ func TestSuicide_WithTasks(t *testing.T) {
k.lock.Unlock()
// reset the suicide watch, which should reset a stopped timer
glog.Infoln("resetting suicide watch with 0 tasks")
glog.Infoln("Resetting suicide watch with 0 tasks")
select {
case <-k.resetSuicideWatch(nil):
tracker = k.suicideWatch.(*suicideTracker)
@ -192,6 +192,6 @@ func TestSuicide_WithTasks(t *testing.T) {
if j := atomic.LoadUint32(&jumps); j != 1 {
t.Fatalf("expected 1 jumps instead of %d since stop was called", j)
} else {
glog.Infoln("jumps verified") // glog so we get a timestamp
glog.Infoln("Jumps verified") // glog so we get a timestamp
}
}

View File

@ -112,7 +112,7 @@ func (m *SchedulerServer) setEndpoints(serviceName string, ip net.IP, port int)
}
if !reflect.DeepEqual(e.Subsets, want) {
e.Subsets = want
glog.Infof("setting endpoints for master service %q to %#v", serviceName, e)
glog.Infof("Setting endpoints for master service %q to %#v", serviceName, e)
_, err = createOrUpdate(e)
return err
}

View File

@ -350,7 +350,7 @@ func (e *endpointController) syncService(key string) {
}
}
if reflect.DeepEqual(currentEndpoints.Subsets, subsets) && reflect.DeepEqual(currentEndpoints.Labels, service.Labels) {
glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
glog.V(5).Infof("Endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
return
}
newEndpoints := currentEndpoints

View File

@ -86,7 +86,7 @@ func (config DeferredLoadingClientConfig) ClientConfig() (*client.Config, error)
icc := inClusterClientConfig{}
defaultConfig, err := DefaultClientConfig.ClientConfig()
if icc.Possible() && err == nil && reflect.DeepEqual(mergedConfig, defaultConfig) {
glog.V(2).Info("no kubeconfig could be created, falling back to service account.")
glog.V(2).Info("No kubeconfig could be created, falling back to service account.")
return icc.ClientConfig()
}

View File

@ -259,7 +259,7 @@ func InClusterConfig() (*Config, error) {
tlsClientConfig := TLSClientConfig{}
rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey
if _, err := util.CertPoolFromFile(rootCAFile); err != nil {
glog.Errorf("expected to load root CA config from %s, but got err: %v", rootCAFile, err)
glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
} else {
tlsClientConfig.CAFile = rootCAFile
}

View File

@ -309,13 +309,13 @@ type versionToResourceToFieldMapping map[string]resourceTypeToFieldMapping
func (v versionToResourceToFieldMapping) filterField(apiVersion, resourceType, field, value string) (newField, newValue string, err error) {
rMapping, ok := v[apiVersion]
if !ok {
glog.Warningf("field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", apiVersion, resourceType, field, value)
glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", apiVersion, resourceType, field, value)
return field, value, nil
}
newField, newValue, err = rMapping.filterField(resourceType, field, value)
if err != nil {
// This is only a warning until we find and fix all of the client's usages.
glog.Warningf("field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", apiVersion, resourceType, field, value)
glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", apiVersion, resourceType, field, value)
return field, value, nil
}
return newField, newValue, nil

View File

@ -683,7 +683,7 @@ func isAlive(instance *ec2.Instance) bool {
case "pending", "running":
return true
default:
glog.Errorf("unknown EC2 instance state: %s", stateName)
glog.Errorf("Unknown EC2 instance state: %s", stateName)
return false
}
}
@ -724,13 +724,13 @@ func (s *AWSCloud) getInstancesByRegex(regex string) ([]string, error) {
// Only return fully-ready instances when listing instances
// (vs a query by name, where we will return it if we find it)
if orEmpty(instance.State.Name) == "pending" {
glog.V(2).Infof("skipping EC2 instance (pending): %s", *instance.InstanceId)
glog.V(2).Infof("Skipping EC2 instance (pending): %s", *instance.InstanceId)
continue
}
privateDNSName := orEmpty(instance.PrivateDnsName)
if privateDNSName == "" {
glog.V(2).Infof("skipping EC2 instance (no PrivateDNSName): %s",
glog.V(2).Infof("Skipping EC2 instance (no PrivateDNSName): %s",
orEmpty(instance.InstanceId))
continue
}
@ -1514,7 +1514,7 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string, vpcID st
}
}
if !ignore {
glog.Error("error creating security group: ", err)
glog.Error("Error creating security group: ", err)
return "", err
}
time.Sleep(1 * time.Second)
@ -1617,7 +1617,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancer(name, region string, publicIP net.IP, p
subnets, err := s.ec2.DescribeSubnets(request)
if err != nil {
glog.Error("error describing subnets: ", err)
glog.Error("Error describing subnets: ", err)
return nil, err
}
@ -1625,7 +1625,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancer(name, region string, publicIP net.IP, p
for _, subnet := range subnets {
subnetIDs = append(subnetIDs, orEmpty(subnet.SubnetId))
if !strings.HasPrefix(orEmpty(subnet.AvailabilityZone), region) {
glog.Error("found AZ that did not match region", orEmpty(subnet.AvailabilityZone), " vs ", region)
glog.Error("Found AZ that did not match region", orEmpty(subnet.AvailabilityZone), " vs ", region)
return nil, fmt.Errorf("invalid AZ for region")
}
// zones = append(zones, subnet.AvailabilityZone)
@ -1639,7 +1639,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancer(name, region string, publicIP net.IP, p
sgDescription := "Security group for Kubernetes ELB " + name
securityGroupID, err = s.ensureSecurityGroup(sgName, sgDescription, orEmpty(vpc.VpcId))
if err != nil {
glog.Error("error creating load balancer security group: ", err)
glog.Error("Error creating load balancer security group: ", err)
return nil, err
}
@ -1814,7 +1814,7 @@ func (s *AWSCloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalan
for _, instance := range allInstances {
securityGroupId := findSecurityGroupForInstance(instance)
if isNilOrEmpty(securityGroupId) {
glog.Warning("ignoring instance without security group: ", orEmpty(instance.InstanceId))
glog.Warning("Ignoring instance without security group: ", orEmpty(instance.InstanceId))
continue
}
@ -1824,7 +1824,7 @@ func (s *AWSCloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalan
// Compare to actual groups
for _, actualGroup := range actualGroups {
if isNilOrEmpty(actualGroup.GroupId) {
glog.Warning("ignoring group without ID: ", actualGroup)
glog.Warning("Ignoring group without ID: ", actualGroup)
continue
}
@ -1899,7 +1899,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancerDeleted(name, region string) error {
// De-authorize the load balancer security group from the instances security group
err = s.updateInstanceSecurityGroupsForLoadBalancer(lb, nil)
if err != nil {
glog.Error("error deregistering load balancer from instance security groups: ", err)
glog.Error("Error deregistering load balancer from instance security groups: ", err)
return err
}
}
@ -1912,7 +1912,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancerDeleted(name, region string) error {
_, err = s.elb.DeleteLoadBalancer(request)
if err != nil {
// TODO: Check if error was because load balancer was concurrently deleted
glog.Error("error deleting load balancer: ", err)
glog.Error("Error deleting load balancer: ", err)
return err
}
}

View File

@ -340,7 +340,7 @@ func translateAffinityType(affinityType api.ServiceAffinity) GCEAffinityType {
case api.ServiceAffinityNone:
return GCEAffinityTypeNone
default:
glog.Errorf("unexpected affinity type: %v", affinityType)
glog.Errorf("Unexpected affinity type: %v", affinityType)
return GCEAffinityTypeNone
}
}

View File

@ -123,7 +123,7 @@ func (f *ConfigFactory) Create() (*scheduler.Config, error) {
// Creates a scheduler from the name of a registered algorithm provider.
func (f *ConfigFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) {
glog.V(2).Infof("creating scheduler from algorithm provider '%v'", providerName)
glog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName)
provider, err := GetAlgorithmProvider(providerName)
if err != nil {
return nil, err
@ -134,7 +134,7 @@ func (f *ConfigFactory) CreateFromProvider(providerName string) (*scheduler.Conf
// Creates a scheduler from the configuration file
func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) {
glog.V(2).Infof("creating scheduler from configuration: %v", policy)
glog.V(2).Infof("Creating scheduler from configuration: %v", policy)
// validate the policy configuration
if err := validation.ValidatePolicy(policy); err != nil {

View File

@ -271,7 +271,7 @@ var validName = regexp.MustCompile("^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$")
func validateAlgorithmNameOrDie(name string) {
if !validName.MatchString(name) {
glog.Fatalf("algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
glog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
}
}

View File

@ -44,7 +44,7 @@ func (f *FitError) Error() string {
var reason string
// We iterate over all nodes for logging purposes, even though we only return one reason from one node
for node, predicateList := range f.FailedPredicates {
glog.Infof("failed to find fit for pod %v on node %s: %s", f.Pod.Name, node, strings.Join(predicateList.List(), ","))
glog.Infof("Failed to find fit for pod %v on node %s: %s", f.Pod.Name, node, strings.Join(predicateList.List(), ","))
if len(reason) == 0 {
reason, _ = predicateList.PopAny()
}
@ -195,7 +195,7 @@ func getBestHosts(list algorithm.HostPriorityList) []string {
func EqualPriority(_ *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
nodes, err := nodeLister.List()
if err != nil {
glog.Errorf("failed to list nodes: %v", err)
glog.Errorf("Failed to list nodes: %v", err)
return []algorithm.HostPriority{}, err
}