Add extra logging for mesh health endpoints. (#18647)

pull/18650/head
Derek Menteer 1 year ago committed by GitHub
parent b56fbc7a62
commit a698142325
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -106,6 +106,12 @@ func (h *serverHealthBlocking) Notify(ctx context.Context, args *structs.Service
// their data, rather than holding onto the last-known list of healthy nodes indefinitely. // their data, rather than holding onto the last-known list of healthy nodes indefinitely.
if hadResults { if hadResults {
hadResults = false hadResults = false
h.deps.Logger.Debug("serverHealthBlocking emitting zero check-service-nodes due to insufficient ACL privileges",
"serviceName", structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta),
"correlationID", correlationID,
"connect", args.Connect,
"ingress", args.Ingress,
)
return 0, &structs.IndexedCheckServiceNodes{}, watch.ErrorACLResetData return 0, &structs.IndexedCheckServiceNodes{}, watch.ErrorACLResetData
} }
return 0, nil, acl.ErrPermissionDenied return 0, nil, acl.ErrPermissionDenied
@ -132,6 +138,13 @@ func (h *serverHealthBlocking) Notify(ctx context.Context, args *structs.Service
} }
hadResults = true hadResults = true
h.deps.Logger.Trace("serverHealthBlocking emitting check-service-nodes",
"serviceName", structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta),
"correlationID", correlationID,
"connect", args.Connect,
"ingress", args.Ingress,
"nodes", len(thisReply.Nodes),
)
return thisReply.Index, &thisReply, nil return thisReply.Index, &thisReply, nil
}, },
dispatchBlockingQueryUpdate[*structs.IndexedCheckServiceNodes](ch), dispatchBlockingQueryUpdate[*structs.IndexedCheckServiceNodes](ch),

@ -136,6 +136,10 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
uid := UpstreamIDFromString(uidString) uid := UpstreamIDFromString(uidString)
s.logger.Debug("upstream-target watch fired",
"correlationID", correlationID,
"nodes", len(resp.Nodes),
)
if _, ok := upstreamsSnapshot.WatchedUpstreamEndpoints[uid]; !ok { if _, ok := upstreamsSnapshot.WatchedUpstreamEndpoints[uid]; !ok {
upstreamsSnapshot.WatchedUpstreamEndpoints[uid] = make(map[string]structs.CheckServiceNodes) upstreamsSnapshot.WatchedUpstreamEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
} }

@ -750,6 +750,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
} }
switch len(groupedTarget.Targets) { switch len(groupedTarget.Targets) {
case 0: case 0:
s.Logger.Trace("skipping endpoint generation for zero-length target group", "cluster", clusterName)
continue continue
case 1: case 1:
// We expect one target so this passes through to continue setting the load assignment up. // We expect one target so this passes through to continue setting the load assignment up.
@ -757,7 +758,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
return nil, fmt.Errorf("cannot have more than one target") return nil, fmt.Errorf("cannot have more than one target")
} }
ti := groupedTarget.Targets[0] ti := groupedTarget.Targets[0]
s.Logger.Debug("generating endpoints for", "cluster", clusterName, "targetID", ti.TargetID) s.Logger.Trace("generating endpoints for", "cluster", clusterName, "targetID", ti.TargetID, "gatewayKey", gatewayKey)
targetUID := proxycfg.NewUpstreamIDFromTargetID(ti.TargetID) targetUID := proxycfg.NewUpstreamIDFromTargetID(ti.TargetID)
if targetUID.Peer != "" { if targetUID.Peer != "" {
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, targetUID, mgwMode) loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, targetUID, mgwMode)
@ -779,6 +780,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
forMeshGateway, forMeshGateway,
) )
if !valid { if !valid {
s.Logger.Trace("skipping endpoint generation for invalid target group", "cluster", clusterName)
continue // skip the cluster if we're still populating the snapshot continue // skip the cluster if we're still populating the snapshot
} }

Loading…
Cancel
Save