Fix PeerUpstreamEndpoints and UpstreamPeerTrustBundles to only Cancel watch when needed, otherwise keep the watch active (#21871)

* fix to only reset peering watches when no other target need watching

* remove unused logger

* add changelog

* Update .changelog/21871.txt

Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>

---------

Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
pull/21957/head
Dhia Ayachi 5 days ago committed by GitHub
parent 6662e48363
commit 21cca2dc5b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,3 @@
```release-note:bug
proxycfg: fix a bug where peered upstreams watches are canceled even when another target needs it.
```

@ -476,16 +476,12 @@ func (h *handlerAPIGateway) handleRouteConfigUpdate(ctx context.Context, u Updat
cancelUpstream() cancelUpstream()
delete(snap.APIGateway.WatchedUpstreams[upstreamID], targetID) delete(snap.APIGateway.WatchedUpstreams[upstreamID], targetID)
delete(snap.APIGateway.WatchedUpstreamEndpoints[upstreamID], targetID) delete(snap.APIGateway.WatchedUpstreamEndpoints[upstreamID], targetID)
if targetUID := NewUpstreamIDFromTargetID(targetID); targetUID.Peer != "" {
snap.APIGateway.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.APIGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
} }
cancelDiscoChain() cancelDiscoChain()
delete(snap.APIGateway.WatchedDiscoveryChains, upstreamID) delete(snap.APIGateway.WatchedDiscoveryChains, upstreamID)
} }
reconcilePeeringWatches(snap.APIGateway.DiscoveryChain, snap.APIGateway.UpstreamConfig, snap.APIGateway.PeeredUpstreams, snap.APIGateway.PeerUpstreamEndpoints, snap.APIGateway.UpstreamPeerTrustBundles)
return nil return nil
} }

@ -380,49 +380,7 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
// //
// Clean up data // Clean up data
// //
reconcilePeeringWatches(snap.ConnectProxy.DiscoveryChain, snap.ConnectProxy.UpstreamConfig, snap.ConnectProxy.PeeredUpstreams, snap.ConnectProxy.PeerUpstreamEndpoints, snap.ConnectProxy.UpstreamPeerTrustBundles)
peeredChainTargets := make(map[UpstreamID]struct{})
for _, discoChain := range snap.ConnectProxy.DiscoveryChain {
for _, target := range discoChain.Targets {
if target.Peer == "" {
continue
}
uid := NewUpstreamIDFromTargetID(target.ID)
peeredChainTargets[uid] = struct{}{}
}
}
validPeerNames := make(map[string]struct{})
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
snap.ConnectProxy.PeerUpstreamEndpoints.ForEachKey(func(uid UpstreamID) bool {
// Peered upstream is explicitly defined in upstream config
if _, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok {
validPeerNames[uid.Peer] = struct{}{}
return true
}
// Peered upstream came from dynamic source of imported services
if _, ok := seenUpstreams[uid]; ok {
validPeerNames[uid.Peer] = struct{}{}
return true
}
// Peered upstream came from a discovery chain target
if _, ok := peeredChainTargets[uid]; ok {
validPeerNames[uid.Peer] = struct{}{}
return true
}
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
return true
})
// Iterate through all known trust bundles and remove references to any unseen peer names
snap.ConnectProxy.UpstreamPeerTrustBundles.ForEachKey(func(peerName PeerName) bool {
if _, ok := validPeerNames[peerName]; !ok {
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(peerName)
}
return true
})
case u.CorrelationID == intentionUpstreamsID: case u.CorrelationID == intentionUpstreamsID:
resp, ok := u.Result.(*structs.IndexedServiceList) resp, ok := u.Result.(*structs.IndexedServiceList)
if !ok { if !ok {
@ -490,18 +448,13 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
continue continue
} }
if _, ok := seenUpstreams[uid]; !ok { if _, ok := seenUpstreams[uid]; !ok {
for targetID, cancelFn := range targets { for _, cancelFn := range targets {
cancelFn() cancelFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
} }
delete(snap.ConnectProxy.WatchedUpstreams, uid) delete(snap.ConnectProxy.WatchedUpstreams, uid)
} }
} }
reconcilePeeringWatches(snap.ConnectProxy.DiscoveryChain, snap.ConnectProxy.UpstreamConfig, snap.ConnectProxy.PeeredUpstreams, snap.ConnectProxy.PeerUpstreamEndpoints, snap.ConnectProxy.UpstreamPeerTrustBundles)
for uid := range snap.ConnectProxy.WatchedUpstreamEndpoints { for uid := range snap.ConnectProxy.WatchedUpstreamEndpoints {
if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured { if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured {
continue continue

@ -171,18 +171,13 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent,
delete(snap.IngressGateway.WatchedUpstreams[uid], targetID) delete(snap.IngressGateway.WatchedUpstreams[uid], targetID)
delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID) delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID)
cancelUpstreamFn() cancelUpstreamFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.IngressGateway.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.IngressGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
} }
cancelFn() cancelFn()
delete(snap.IngressGateway.WatchedDiscoveryChains, uid) delete(snap.IngressGateway.WatchedDiscoveryChains, uid)
} }
} }
reconcilePeeringWatches(snap.IngressGateway.DiscoveryChain, snap.IngressGateway.UpstreamConfig, snap.IngressGateway.PeeredUpstreams, snap.IngressGateway.PeerUpstreamEndpoints, snap.IngressGateway.UpstreamPeerTrustBundles)
if err := s.watchIngressLeafCert(ctx, snap); err != nil { if err := s.watchIngressLeafCert(ctx, snap); err != nil {
return err return err

@ -13,12 +13,15 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/hashicorp/go-hclog"
"golang.org/x/time/rate" "golang.org/x/time/rate"
"github.com/hashicorp/go-hclog"
cachetype "github.com/hashicorp/consul/agent/cache-types" cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/proxycfg/internal/watch"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto/private/pbpeering"
) )
const ( const (
@ -551,3 +554,48 @@ func watchMeshGateway(ctx context.Context, opts gatewayWatchOpts) error {
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(opts.key.Partition), EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(opts.key.Partition),
}, correlationId, opts.notifyCh) }, correlationId, opts.notifyCh)
} }
func reconcilePeeringWatches(compiledDiscoveryChains map[UpstreamID]*structs.CompiledDiscoveryChain, upstreams map[UpstreamID]*structs.Upstream, peeredUpstreams map[UpstreamID]struct{}, peerUpstreamEndpoints watch.Map[UpstreamID, structs.CheckServiceNodes], upstreamPeerTrustBundles watch.Map[PeerName, *pbpeering.PeeringTrustBundle]) {
peeredChainTargets := make(map[UpstreamID]struct{})
for _, discoChain := range compiledDiscoveryChains {
for _, target := range discoChain.Targets {
if target.Peer == "" {
continue
}
uid := NewUpstreamIDFromTargetID(target.ID)
peeredChainTargets[uid] = struct{}{}
}
}
validPeerNames := make(map[string]struct{})
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
peerUpstreamEndpoints.ForEachKey(func(uid UpstreamID) bool {
// Peered upstream is explicitly defined in upstream config
if _, ok := upstreams[uid]; ok {
validPeerNames[uid.Peer] = struct{}{}
return true
}
// Peered upstream came from dynamic source of imported services
if _, ok := peeredUpstreams[uid]; ok {
validPeerNames[uid.Peer] = struct{}{}
return true
}
// Peered upstream came from a discovery chain target
if _, ok := peeredChainTargets[uid]; ok {
validPeerNames[uid.Peer] = struct{}{}
return true
}
peerUpstreamEndpoints.CancelWatch(uid)
return true
})
// Iterate through all known trust bundles and remove references to any unseen peer names
upstreamPeerTrustBundles.ForEachKey(func(peerName PeerName) bool {
if _, ok := validPeerNames[peerName]; !ok {
upstreamPeerTrustBundles.CancelWatch(peerName)
}
return true
})
}

@ -102,6 +102,7 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
if err := s.resetWatchesFromChain(ctx, uid, resp.Chain, upstreamsSnapshot); err != nil { if err := s.resetWatchesFromChain(ctx, uid, resp.Chain, upstreamsSnapshot); err != nil {
return err return err
} }
reconcilePeeringWatches(upstreamsSnapshot.DiscoveryChain, upstreamsSnapshot.UpstreamConfig, upstreamsSnapshot.PeeredUpstreams, upstreamsSnapshot.PeerUpstreamEndpoints, upstreamsSnapshot.UpstreamPeerTrustBundles)
case strings.HasPrefix(u.CorrelationID, upstreamPeerWatchIDPrefix): case strings.HasPrefix(u.CorrelationID, upstreamPeerWatchIDPrefix):
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes) resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
@ -301,12 +302,6 @@ func (s *handlerUpstreams) resetWatchesFromChain(
delete(snap.WatchedUpstreams[uid], targetID) delete(snap.WatchedUpstreams[uid], targetID)
delete(snap.WatchedUpstreamEndpoints[uid], targetID) delete(snap.WatchedUpstreamEndpoints[uid], targetID)
cancelFn() cancelFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
} }
var ( var (
@ -479,8 +474,8 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
var entMeta acl.EnterpriseMeta var entMeta acl.EnterpriseMeta
entMeta.Merge(opts.entMeta) entMeta.Merge(opts.entMeta)
ctx, cancel := context.WithCancel(ctx) peerCtx, cancel := context.WithCancel(ctx)
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{ err := s.dataSources.Health.Notify(peerCtx, &structs.ServiceSpecificRequest{
PeerName: opts.peer, PeerName: opts.peer,
Datacenter: opts.datacenter, Datacenter: opts.datacenter,
QueryOptions: structs.QueryOptions{ QueryOptions: structs.QueryOptions{
@ -506,25 +501,25 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
return nil return nil
} }
if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok { if !snap.PeerUpstreamEndpoints.IsWatched(uid) {
snap.PeerUpstreamEndpoints.InitWatch(uid, cancel) snap.PeerUpstreamEndpoints.InitWatch(uid, cancel)
} }
// Check whether a watch for this peer exists to avoid duplicates. // Check whether a watch for this peer exists to avoid duplicates.
if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok {
peerCtx, cancel := context.WithCancel(ctx) if !snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer) {
if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{ peerCtx2, cancel2 := context.WithCancel(ctx)
if err := s.dataSources.TrustBundle.Notify(peerCtx2, &cachetype.TrustBundleReadRequest{
Request: &pbpeering.TrustBundleReadRequest{ Request: &pbpeering.TrustBundleReadRequest{
Name: uid.Peer, Name: uid.Peer,
Partition: uid.PartitionOrDefault(), Partition: uid.PartitionOrDefault(),
}, },
QueryOptions: structs.QueryOptions{Token: s.token}, QueryOptions: structs.QueryOptions{Token: s.token},
}, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil { }, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil {
cancel() cancel2()
return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err) return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
} }
snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel) snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel2)
} }
return nil return nil

Loading…
Cancel
Save