2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// Package cache provides caching features for data from a Consul server.
|
|
|
|
//
|
|
|
|
// While this is similar in some ways to the "agent/ae" package, a key
|
|
|
|
// difference is that with anti-entropy, the agent is the authoritative
|
|
|
|
// source so it resolves differences the server may have. With caching (this
|
|
|
|
// package), the server is the authoritative source and we do our best to
|
|
|
|
// balance performance and correctness, depending on the type of data being
|
|
|
|
// requested.
|
|
|
|
//
|
2018-04-17 23:42:49 +00:00
|
|
|
// The types of data that can be cached is configurable via the Type interface.
|
|
|
|
// This allows specialized behavior for certain types of data. Each type of
|
|
|
|
// Consul data (CA roots, leaf certs, intentions, KV, catalog, etc.) will
|
|
|
|
// have to be manually implemented. This usually is not much work, see
|
|
|
|
// the "agent/cache-types" package.
|
2018-04-04 03:46:07 +00:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
2020-06-15 15:01:25 +00:00
|
|
|
"context"
|
2018-04-04 03:46:07 +00:00
|
|
|
"fmt"
|
2020-10-03 00:04:45 +00:00
|
|
|
"io"
|
2020-07-06 17:26:56 +00:00
|
|
|
"strconv"
|
2018-04-04 03:46:07 +00:00
|
|
|
"sync"
|
2018-10-04 10:27:11 +00:00
|
|
|
"sync/atomic"
|
2018-04-04 03:46:07 +00:00
|
|
|
"time"
|
2018-04-17 23:03:13 +00:00
|
|
|
|
|
|
|
"github.com/armon/go-metrics"
|
2020-11-14 00:26:08 +00:00
|
|
|
"github.com/armon/go-metrics/prometheus"
|
2021-02-12 17:43:36 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2020-07-27 21:11:11 +00:00
|
|
|
"golang.org/x/time/rate"
|
2020-10-03 00:04:45 +00:00
|
|
|
|
2021-02-09 15:15:53 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2020-10-03 00:04:45 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2020-10-20 23:09:51 +00:00
|
|
|
"github.com/hashicorp/consul/lib/ttlcache"
|
2018-04-04 03:46:07 +00:00
|
|
|
)
|
|
|
|
|
2020-11-14 00:26:08 +00:00
|
|
|
// TODO(kit): remove the namespace from these once the metrics themselves change
|
|
|
|
var Gauges = []prometheus.GaugeDefinition{
|
|
|
|
{
|
|
|
|
Name: []string{"consul", "cache", "entries_count"},
|
2022-09-09 17:13:43 +00:00
|
|
|
Help: "Deprecated - please use cache_entries_count instead.",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"cache", "entries_count"},
|
2020-11-23 21:06:30 +00:00
|
|
|
Help: "Represents the number of entries in this cache.",
|
2020-11-14 00:26:08 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(kit): remove the namespace from these once the metrics themselves change
|
|
|
|
var Counters = []prometheus.CounterDefinition{
|
|
|
|
{
|
|
|
|
Name: []string{"consul", "cache", "bypass"},
|
2022-09-09 17:13:43 +00:00
|
|
|
Help: "Deprecated - please use cache_bypass instead.",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"cache", "bypass"},
|
2020-11-23 21:06:30 +00:00
|
|
|
Help: "Counts how many times a request bypassed the cache because no cache-key was provided.",
|
2020-11-14 00:26:08 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"consul", "cache", "fetch_success"},
|
2022-09-09 17:13:43 +00:00
|
|
|
Help: "Deprecated - please use cache_fetch_success instead.",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"cache", "fetch_success"},
|
2020-11-23 21:06:30 +00:00
|
|
|
Help: "Counts the number of successful fetches by the cache.",
|
2020-11-14 00:26:08 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"consul", "cache", "fetch_error"},
|
2022-09-09 17:13:43 +00:00
|
|
|
Help: "Deprecated - please use cache_fetch_error instead.",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"cache", "fetch_error"},
|
2020-11-23 21:06:30 +00:00
|
|
|
Help: "Counts the number of failed fetches by the cache.",
|
2020-11-14 00:26:08 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"consul", "cache", "evict_expired"},
|
2022-09-09 17:13:43 +00:00
|
|
|
Help: "Deprecated - please use cache_evict_expired instead.",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"cache", "evict_expired"},
|
2020-11-23 21:06:30 +00:00
|
|
|
Help: "Counts the number of expired entries that are evicted.",
|
2020-11-14 00:26:08 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-05-09 18:04:52 +00:00
|
|
|
// Constants related to refresh backoff. We probably don't ever need to
|
|
|
|
// make these configurable knobs since they primarily exist to lower load.
|
|
|
|
const (
|
2023-04-19 18:17:21 +00:00
|
|
|
CacheRefreshBackoffMin = 3 // 3 attempts before backing off
|
|
|
|
CacheRefreshMaxWait = 1 * time.Minute // maximum backoff wait time
|
2020-07-28 16:24:30 +00:00
|
|
|
|
|
|
|
// The following constants are default values for the cache entry
|
|
|
|
// rate limiter settings.
|
|
|
|
|
|
|
|
// DefaultEntryFetchRate is the default rate at which cache entries can
|
|
|
|
// be fetch. This defaults to not being unlimited
|
|
|
|
DefaultEntryFetchRate = rate.Inf
|
|
|
|
|
|
|
|
// DefaultEntryFetchMaxBurst is the number of cache entry fetches that can
|
|
|
|
// occur in a burst.
|
|
|
|
DefaultEntryFetchMaxBurst = 2
|
2018-05-09 18:04:52 +00:00
|
|
|
)
|
|
|
|
|
2018-04-17 23:42:49 +00:00
|
|
|
// Cache is a agent-local cache of Consul data. Create a Cache using the
|
|
|
|
// New function. A zero-value Cache is not ready for usage and will result
|
|
|
|
// in a panic.
|
|
|
|
//
|
|
|
|
// The types of data to be cached must be registered via RegisterType. Then,
|
|
|
|
// calls to Get specify the type and a Request implementation. The
|
|
|
|
// implementation of Request is usually done directly on the standard RPC
|
|
|
|
// struct in agent/structs. This API makes cache usage a mostly drop-in
|
|
|
|
// replacement for non-cached RPC calls.
|
|
|
|
//
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// The cache is partitioned by ACL and datacenter/peer. This allows the cache
|
2018-04-17 23:42:49 +00:00
|
|
|
// to be safe for multi-DC queries and for queries where the data is modified
|
|
|
|
// due to ACLs all without the cache having to have any clever logic, at
|
|
|
|
// the slight expense of a less perfect cache.
|
|
|
|
//
|
|
|
|
// The Cache exposes various metrics via go-metrics. Please view the source
|
|
|
|
// searching for "metrics." to see the various metrics exposed. These can be
|
|
|
|
// used to explore the performance of the cache.
|
2018-04-04 03:46:07 +00:00
|
|
|
type Cache struct {
|
2018-04-10 15:05:34 +00:00
|
|
|
// types stores the list of data types that the cache knows how to service.
|
|
|
|
// These can be dynamically registered with RegisterType.
|
2018-04-04 03:46:07 +00:00
|
|
|
typesLock sync.RWMutex
|
|
|
|
types map[string]typeEntry
|
2018-04-10 15:05:34 +00:00
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// entries contains the actual cache data. Access to entries and
|
|
|
|
// entriesExpiryHeap must be protected by entriesLock.
|
|
|
|
//
|
|
|
|
// entriesExpiryHeap is a heap of *cacheEntry values ordered by
|
|
|
|
// expiry, with the soonest to expire being first in the list (index 0).
|
2018-04-10 15:05:34 +00:00
|
|
|
//
|
|
|
|
// NOTE(mitchellh): The entry map key is currently a string in the format
|
|
|
|
// of "<DC>/<ACL token>/<Request key>" in order to properly partition
|
|
|
|
// requests to different datacenters and ACL tokens. This format has some
|
|
|
|
// big drawbacks: we can't evict by datacenter, ACL token, etc. For an
|
2018-05-09 19:30:43 +00:00
|
|
|
// initial implementation this works and the tests are agnostic to the
|
2018-04-10 15:05:34 +00:00
|
|
|
// internal storage format so changing this should be possible safely.
|
2018-04-20 00:31:50 +00:00
|
|
|
entriesLock sync.RWMutex
|
|
|
|
entries map[string]cacheEntry
|
2020-10-20 22:34:42 +00:00
|
|
|
entriesExpiryHeap *ttlcache.ExpiryHeap
|
2023-04-19 18:17:21 +00:00
|
|
|
|
|
|
|
fetchLock sync.Mutex
|
|
|
|
lastFetchID uint64
|
|
|
|
fetchHandles map[string]fetchHandle
|
2022-10-17 19:38:10 +00:00
|
|
|
|
2018-10-04 10:27:11 +00:00
|
|
|
// stopped is used as an atomic flag to signal that the Cache has been
|
|
|
|
// discarded so background fetches and expiry processing should stop.
|
|
|
|
stopped uint32
|
|
|
|
// stopCh is closed when Close is called
|
|
|
|
stopCh chan struct{}
|
2020-07-27 21:11:11 +00:00
|
|
|
// options includes a per Cache Rate limiter specification to avoid performing too many queries
|
|
|
|
options Options
|
|
|
|
rateLimitContext context.Context
|
|
|
|
rateLimitCancel context.CancelFunc
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
type fetchHandle struct {
|
|
|
|
id uint64
|
|
|
|
stopCh chan struct{}
|
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// typeEntry is a single type that is registered with a Cache.
|
|
|
|
type typeEntry struct {
|
2020-04-03 20:01:56 +00:00
|
|
|
// Name that was used to register the Type
|
|
|
|
Name string
|
2018-04-04 03:46:07 +00:00
|
|
|
Type Type
|
|
|
|
Opts *RegisterOptions
|
|
|
|
}
|
|
|
|
|
2018-06-15 12:13:54 +00:00
|
|
|
// ResultMeta is returned from Get calls along with the value and can be used
|
|
|
|
// to expose information about the cache status for debugging or testing.
|
|
|
|
type ResultMeta struct {
|
2018-09-06 10:34:28 +00:00
|
|
|
// Hit indicates whether or not the request was a cache hit
|
2018-06-15 12:13:54 +00:00
|
|
|
Hit bool
|
2018-09-06 10:34:28 +00:00
|
|
|
|
|
|
|
// Age identifies how "stale" the result is. It's semantics differ based on
|
|
|
|
// whether or not the cache type performs background refresh or not as defined
|
|
|
|
// in https://www.consul.io/api/index.html#agent-caching.
|
|
|
|
//
|
|
|
|
// For background refresh types, Age is 0 unless the background blocking query
|
|
|
|
// is currently in a failed state and so not keeping up with the server's
|
|
|
|
// values. If it is non-zero it represents the time since the first failure to
|
|
|
|
// connect during background refresh, and is reset after a background request
|
|
|
|
// does manage to reconnect and either return successfully, or block for at
|
|
|
|
// least the yamux keepalive timeout of 30 seconds (which indicates the
|
|
|
|
// connection is OK but blocked as expected).
|
|
|
|
//
|
|
|
|
// For simple cache types, Age is the time since the result being returned was
|
|
|
|
// fetched from the servers.
|
|
|
|
Age time.Duration
|
2018-10-02 10:27:10 +00:00
|
|
|
|
|
|
|
// Index is the internal ModifyIndex for the cache entry. Not all types
|
|
|
|
// support blocking and all that do will likely have this in their result type
|
|
|
|
// already but this allows generic code to reason about whether cache values
|
|
|
|
// have changed.
|
|
|
|
Index uint64
|
2018-06-15 12:13:54 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 13:30:14 +00:00
|
|
|
// Options are options for the Cache.
|
|
|
|
type Options struct {
|
2021-02-12 17:43:36 +00:00
|
|
|
Logger hclog.Logger
|
|
|
|
|
2020-07-27 21:11:11 +00:00
|
|
|
// EntryFetchMaxBurst max burst size of RateLimit for a single cache entry
|
|
|
|
EntryFetchMaxBurst int
|
|
|
|
// EntryFetchRate represents the max calls/sec for a single cache entry
|
|
|
|
EntryFetchRate rate.Limit
|
2018-04-08 13:30:14 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 06:51:19 +00:00
|
|
|
// Equal return true if both options are equivalent
|
|
|
|
func (o Options) Equal(other Options) bool {
|
|
|
|
return o.EntryFetchMaxBurst == other.EntryFetchMaxBurst && o.EntryFetchRate == other.EntryFetchRate
|
|
|
|
}
|
|
|
|
|
2020-08-24 21:33:10 +00:00
|
|
|
// applyDefaultValuesOnOptions set default values on options and returned updated value
|
|
|
|
func applyDefaultValuesOnOptions(options Options) Options {
|
2020-07-28 16:24:30 +00:00
|
|
|
if options.EntryFetchRate == 0.0 {
|
|
|
|
options.EntryFetchRate = DefaultEntryFetchRate
|
|
|
|
}
|
|
|
|
if options.EntryFetchMaxBurst == 0 {
|
|
|
|
options.EntryFetchMaxBurst = DefaultEntryFetchMaxBurst
|
|
|
|
}
|
2021-02-12 17:43:36 +00:00
|
|
|
if options.Logger == nil {
|
|
|
|
options.Logger = hclog.New(nil)
|
|
|
|
}
|
2020-08-24 21:33:10 +00:00
|
|
|
return options
|
|
|
|
}
|
2020-07-28 16:24:30 +00:00
|
|
|
|
2020-08-24 21:33:10 +00:00
|
|
|
// New creates a new cache with the given RPC client and reasonable defaults.
|
|
|
|
// Further settings can be tweaked on the returned value.
|
|
|
|
func New(options Options) *Cache {
|
|
|
|
options = applyDefaultValuesOnOptions(options)
|
2020-07-27 21:11:11 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2018-04-20 00:31:50 +00:00
|
|
|
c := &Cache{
|
|
|
|
types: make(map[string]typeEntry),
|
|
|
|
entries: make(map[string]cacheEntry),
|
2020-10-20 22:34:42 +00:00
|
|
|
entriesExpiryHeap: ttlcache.NewExpiryHeap(),
|
2023-04-19 18:17:21 +00:00
|
|
|
fetchHandles: make(map[string]fetchHandle),
|
2018-10-04 10:27:11 +00:00
|
|
|
stopCh: make(chan struct{}),
|
2020-07-27 21:11:11 +00:00
|
|
|
options: options,
|
|
|
|
rateLimitContext: ctx,
|
|
|
|
rateLimitCancel: cancel,
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
2018-04-20 00:31:50 +00:00
|
|
|
|
|
|
|
// Start the expiry watcher
|
|
|
|
go c.runExpiryLoop()
|
|
|
|
|
|
|
|
return c
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterOptions are options that can be associated with a type being
|
|
|
|
// registered for the cache. This changes the behavior of the cache for
|
|
|
|
// this type.
|
|
|
|
type RegisterOptions struct {
|
2018-04-20 00:31:50 +00:00
|
|
|
// LastGetTTL is the time that the values returned by this type remain
|
|
|
|
// in the cache after the last get operation. If a value isn't accessed
|
|
|
|
// within this duration, the value is purged from the cache and
|
|
|
|
// background refreshing will cease.
|
|
|
|
LastGetTTL time.Duration
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// Refresh configures whether the data is actively refreshed or if
|
|
|
|
// the data is only refreshed on an explicit Get. The default (false)
|
|
|
|
// is to only request data on explicit Get.
|
|
|
|
Refresh bool
|
|
|
|
|
2020-04-14 22:29:30 +00:00
|
|
|
// SupportsBlocking should be set to true if the type supports blocking queries.
|
|
|
|
// Types that do not support blocking queries will not be able to use
|
|
|
|
// background refresh nor will the cache attempt blocking fetches if the
|
|
|
|
// client requests them with MinIndex.
|
|
|
|
SupportsBlocking bool
|
|
|
|
|
2020-07-06 16:59:44 +00:00
|
|
|
// RefreshTimer is the time to sleep between attempts to refresh data.
|
2018-04-04 03:46:07 +00:00
|
|
|
// If this is zero, then data is refreshed immediately when a fetch
|
|
|
|
// is returned.
|
|
|
|
//
|
2020-07-20 20:09:20 +00:00
|
|
|
// Using different values for RefreshTimer and QueryTimeout, various
|
2020-07-06 16:59:44 +00:00
|
|
|
// "refresh" mechanisms can be implemented:
|
2018-04-04 03:46:07 +00:00
|
|
|
//
|
|
|
|
// * With a high timer duration and a low timeout, a timer-based
|
|
|
|
// refresh can be set that minimizes load on the Consul servers.
|
|
|
|
//
|
|
|
|
// * With a low timer and high timeout duration, a blocking-query-based
|
|
|
|
// refresh can be set so that changes in server data are recognized
|
|
|
|
// within the cache very quickly.
|
|
|
|
//
|
2020-07-06 16:59:44 +00:00
|
|
|
RefreshTimer time.Duration
|
|
|
|
|
2020-07-20 20:09:20 +00:00
|
|
|
// QueryTimeout is the default value for the maximum query time for a fetch
|
2020-07-06 16:59:44 +00:00
|
|
|
// operation. It is set as FetchOptions.Timeout so that cache.Type
|
|
|
|
// implementations can use it as the MaxQueryTime.
|
2020-07-20 20:09:20 +00:00
|
|
|
QueryTimeout time.Duration
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterType registers a cacheable type.
|
2018-04-17 23:03:13 +00:00
|
|
|
//
|
|
|
|
// This makes the type available for Get but does not automatically perform
|
|
|
|
// any prefetching. In order to populate the cache, Get must be called.
|
2020-04-14 22:29:30 +00:00
|
|
|
func (c *Cache) RegisterType(n string, typ Type) {
|
|
|
|
opts := typ.RegisterOptions()
|
2018-04-20 00:31:50 +00:00
|
|
|
if opts.LastGetTTL == 0 {
|
|
|
|
opts.LastGetTTL = 72 * time.Hour // reasonable default is days
|
|
|
|
}
|
2018-04-19 18:36:14 +00:00
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
c.typesLock.Lock()
|
|
|
|
defer c.typesLock.Unlock()
|
2020-04-14 22:29:30 +00:00
|
|
|
c.types[n] = typeEntry{Name: n, Type: typ, Opts: &opts}
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
2020-08-24 21:33:10 +00:00
|
|
|
// ReloadOptions updates the cache with the new options
|
|
|
|
// return true if Cache is updated, false if already up to date
|
|
|
|
func (c *Cache) ReloadOptions(options Options) bool {
|
|
|
|
options = applyDefaultValuesOnOptions(options)
|
2020-08-27 06:51:19 +00:00
|
|
|
modified := !options.Equal(c.options)
|
|
|
|
if modified {
|
2020-08-24 21:33:10 +00:00
|
|
|
c.entriesLock.RLock()
|
|
|
|
defer c.entriesLock.RUnlock()
|
|
|
|
for _, entry := range c.entries {
|
|
|
|
if c.options.EntryFetchRate != options.EntryFetchRate {
|
|
|
|
entry.FetchRateLimiter.SetLimit(options.EntryFetchRate)
|
|
|
|
}
|
|
|
|
if c.options.EntryFetchMaxBurst != options.EntryFetchMaxBurst {
|
|
|
|
entry.FetchRateLimiter.SetBurst(options.EntryFetchMaxBurst)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.options.EntryFetchRate = options.EntryFetchRate
|
|
|
|
c.options.EntryFetchMaxBurst = options.EntryFetchMaxBurst
|
|
|
|
}
|
2020-08-27 06:51:19 +00:00
|
|
|
return modified
|
2020-08-24 21:33:10 +00:00
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// Get loads the data for the given type and request. If data satisfying the
|
|
|
|
// minimum index is present in the cache, it is returned immediately. Otherwise,
|
|
|
|
// this will block until the data is available or the request timeout is
|
|
|
|
// reached.
|
|
|
|
//
|
|
|
|
// Multiple Get calls for the same Request (matching CacheKey value) will
|
|
|
|
// block on a single network request.
|
2018-04-17 23:03:13 +00:00
|
|
|
//
|
|
|
|
// The timeout specified by the Request will be the timeout on the cache
|
|
|
|
// Get, and does not correspond to the timeout of any background data
|
|
|
|
// fetching. If the timeout is reached before data satisfying the minimum
|
|
|
|
// index is retrieved, the last known value (maybe nil) is returned. No
|
|
|
|
// error is returned on timeout. This matches the behavior of Consul blocking
|
|
|
|
// queries.
|
2020-06-15 15:01:25 +00:00
|
|
|
func (c *Cache) Get(ctx context.Context, t string, r Request) (interface{}, ResultMeta, error) {
|
2020-04-03 20:01:56 +00:00
|
|
|
c.typesLock.RLock()
|
|
|
|
tEntry, ok := c.types[t]
|
|
|
|
c.typesLock.RUnlock()
|
|
|
|
if !ok {
|
|
|
|
// Shouldn't happen given that we successfully fetched this at least
|
|
|
|
// once. But be robust against panics.
|
|
|
|
return nil, ResultMeta{}, fmt.Errorf("unknown type in cache: %s", t)
|
|
|
|
}
|
2020-06-15 15:01:25 +00:00
|
|
|
return c.getWithIndex(ctx, newGetOptions(tEntry, r))
|
2020-04-13 18:49:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// getOptions contains the arguments for a Get request. It is used in place of
|
|
|
|
// Request so that internal functions can modify Info without having to extract
|
|
|
|
// it from the Request each time.
|
|
|
|
type getOptions struct {
|
|
|
|
// Fetch is a closure over tEntry.Type.Fetch which provides the original
|
|
|
|
// Request from the caller.
|
|
|
|
Fetch func(opts FetchOptions) (FetchResult, error)
|
|
|
|
Info RequestInfo
|
|
|
|
TypeEntry typeEntry
|
|
|
|
}
|
|
|
|
|
|
|
|
func newGetOptions(tEntry typeEntry, r Request) getOptions {
|
|
|
|
return getOptions{
|
|
|
|
Fetch: func(opts FetchOptions) (FetchResult, error) {
|
|
|
|
return tEntry.Type.Fetch(opts, r)
|
|
|
|
},
|
|
|
|
Info: r.CacheInfo(),
|
|
|
|
TypeEntry: tEntry,
|
|
|
|
}
|
2018-10-02 10:27:10 +00:00
|
|
|
}
|
|
|
|
|
2019-05-07 10:15:49 +00:00
|
|
|
// getEntryLocked retrieves a cache entry and checks if it is ready to be
|
|
|
|
// returned given the other parameters. It reads from entries and the caller
|
|
|
|
// has to issue a read lock if necessary.
|
2020-04-02 20:28:47 +00:00
|
|
|
func (c *Cache) getEntryLocked(
|
|
|
|
tEntry typeEntry,
|
|
|
|
key string,
|
2020-04-13 18:49:13 +00:00
|
|
|
info RequestInfo,
|
2020-04-02 20:28:47 +00:00
|
|
|
) (entryExists bool, entryValid bool, entry cacheEntry) {
|
2019-05-07 10:15:49 +00:00
|
|
|
entry, ok := c.entries[key]
|
2020-04-02 20:28:47 +00:00
|
|
|
if !entry.Valid {
|
|
|
|
return ok, false, entry
|
2019-05-07 10:15:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check index is not specified or lower than value, or the type doesn't
|
|
|
|
// support blocking.
|
2020-04-13 18:49:13 +00:00
|
|
|
if tEntry.Opts.SupportsBlocking && info.MinIndex > 0 && info.MinIndex >= entry.Index {
|
2019-05-07 10:15:49 +00:00
|
|
|
// MinIndex was given and matches or is higher than current value so we
|
|
|
|
// ignore the cache and fallthrough to blocking on a new value below.
|
2020-04-02 20:28:47 +00:00
|
|
|
return true, false, entry
|
2019-05-07 10:15:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check MaxAge is not exceeded if this is not a background refreshing type
|
|
|
|
// and MaxAge was specified.
|
2020-04-13 18:49:13 +00:00
|
|
|
if !tEntry.Opts.Refresh && info.MaxAge > 0 && entryExceedsMaxAge(info.MaxAge, entry) {
|
2020-04-02 20:28:47 +00:00
|
|
|
return true, false, entry
|
2019-05-07 10:15:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-02 20:28:47 +00:00
|
|
|
// Check if re-validate is requested. If so the first time round the
|
2019-05-07 10:15:49 +00:00
|
|
|
// loop is not a hit but subsequent ones should be treated normally.
|
2020-04-13 18:49:13 +00:00
|
|
|
if !tEntry.Opts.Refresh && info.MustRevalidate {
|
2023-04-19 18:17:21 +00:00
|
|
|
if entry.Fetching {
|
|
|
|
// There is an active blocking query for this data, which has not
|
|
|
|
// returned. We can logically deduce that the contents of the cache
|
|
|
|
// are actually current, and we can simply return this while
|
|
|
|
// leaving the blocking query alone.
|
2022-04-20 17:21:47 +00:00
|
|
|
return true, true, entry
|
|
|
|
}
|
2020-04-02 20:28:47 +00:00
|
|
|
return true, false, entry
|
2019-05-07 10:15:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-02 20:28:47 +00:00
|
|
|
return true, true, entry
|
|
|
|
}
|
|
|
|
|
|
|
|
func entryExceedsMaxAge(maxAge time.Duration, entry cacheEntry) bool {
|
|
|
|
return !entry.FetchedAt.IsZero() && maxAge < time.Since(entry.FetchedAt)
|
2019-05-07 10:15:49 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 10:27:10 +00:00
|
|
|
// getWithIndex implements the main Get functionality but allows internal
|
|
|
|
// callers (Watch) to manipulate the blocking index separately from the actual
|
|
|
|
// request object.
|
2020-06-15 15:01:25 +00:00
|
|
|
func (c *Cache) getWithIndex(ctx context.Context, r getOptions) (interface{}, ResultMeta, error) {
|
2020-04-13 18:49:13 +00:00
|
|
|
if r.Info.Key == "" {
|
2018-04-17 23:03:13 +00:00
|
|
|
metrics.IncrCounter([]string{"consul", "cache", "bypass"}, 1)
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.IncrCounter([]string{"cache", "bypass"}, 1)
|
2018-04-17 23:03:13 +00:00
|
|
|
|
2018-04-08 13:30:14 +00:00
|
|
|
// If no key is specified, then we do not cache this request.
|
|
|
|
// Pass directly through to the backend.
|
2020-04-13 18:49:13 +00:00
|
|
|
result, err := r.Fetch(FetchOptions{MinIndex: r.Info.MinIndex})
|
|
|
|
return result.Value, ResultMeta{}, err
|
2018-04-08 13:30:14 +00:00
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
key := makeEntryKey(r.TypeEntry.Name, r.Info.Datacenter, r.Info.PeerName, r.Info.Token, r.Info.Key)
|
2018-04-10 15:05:34 +00:00
|
|
|
|
2018-04-11 09:18:24 +00:00
|
|
|
// First time through
|
2018-06-03 20:15:09 +00:00
|
|
|
first := true
|
2018-04-11 09:18:24 +00:00
|
|
|
|
2018-04-22 20:52:48 +00:00
|
|
|
// timeoutCh for watching our timeout
|
2018-04-16 10:06:08 +00:00
|
|
|
var timeoutCh <-chan time.Time
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
RETRY_GET:
|
2018-10-02 10:27:10 +00:00
|
|
|
// Get the current value
|
|
|
|
c.entriesLock.RLock()
|
2020-04-13 18:49:13 +00:00
|
|
|
_, entryValid, entry := c.getEntryLocked(r.TypeEntry, key, r.Info)
|
2018-10-02 10:27:10 +00:00
|
|
|
c.entriesLock.RUnlock()
|
|
|
|
|
2021-04-08 10:11:15 +00:00
|
|
|
if entry.Expiry != nil {
|
|
|
|
// The entry already exists in the TTL heap, touch it to keep it alive since
|
|
|
|
// this Get is still interested in the value. Note that we used to only do
|
|
|
|
// this in the `entryValid` block below but that means that a cache entry
|
|
|
|
// will expire after it's TTL regardless of how many callers are waiting for
|
|
|
|
// updates in this method in a couple of cases:
|
|
|
|
// 1. If the agent is disconnected from servers for the TTL then the client
|
|
|
|
// will be in backoff getting errors on each call to Get and since an
|
|
|
|
// errored cache entry has Valid = false it won't be touching the TTL.
|
|
|
|
// 2. If the value is just not changing then the client's current index
|
|
|
|
// will be equal to the entry index and entryValid will be false. This
|
|
|
|
// is a common case!
|
|
|
|
//
|
|
|
|
// But regardless of the state of the entry, assuming it's already in the
|
|
|
|
// TTL heap, we should touch it every time around here since this caller at
|
|
|
|
// least still cares about the value!
|
|
|
|
c.entriesLock.Lock()
|
|
|
|
c.entriesExpiryHeap.Update(entry.Expiry.Index(), r.TypeEntry.Opts.LastGetTTL)
|
|
|
|
c.entriesLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2020-04-02 20:28:47 +00:00
|
|
|
if entryValid {
|
2018-10-02 10:27:10 +00:00
|
|
|
meta := ResultMeta{Index: entry.Index}
|
2018-09-06 10:34:28 +00:00
|
|
|
if first {
|
2020-04-13 18:49:13 +00:00
|
|
|
metrics.IncrCounter([]string{"consul", "cache", r.TypeEntry.Name, "hit"}, 1)
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.IncrCounter([]string{"cache", r.TypeEntry.Name, "hit"}, 1)
|
2018-09-06 10:34:28 +00:00
|
|
|
meta.Hit = true
|
2018-04-08 13:30:14 +00:00
|
|
|
}
|
2018-09-06 10:34:28 +00:00
|
|
|
|
|
|
|
// If refresh is enabled, calculate age based on whether the background
|
|
|
|
// routine is still connected.
|
2020-04-13 18:49:13 +00:00
|
|
|
if r.TypeEntry.Opts.Refresh {
|
2018-09-06 10:34:28 +00:00
|
|
|
meta.Age = time.Duration(0)
|
|
|
|
if !entry.RefreshLostContact.IsZero() {
|
|
|
|
meta.Age = time.Since(entry.RefreshLostContact)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// For non-background refresh types, the age is just how long since we
|
|
|
|
// fetched it last.
|
|
|
|
if !entry.FetchedAt.IsZero() {
|
|
|
|
meta.Age = time.Since(entry.FetchedAt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-08 10:06:38 +00:00
|
|
|
// We purposely do not return an error here since the cache only works with
|
|
|
|
// fetching values that either have a value or have an error, but not both.
|
|
|
|
// The Error may be non-nil in the entry in the case that an error has
|
|
|
|
// occurred _since_ the last good value, but we still want to return the
|
|
|
|
// good value to clients that are not requesting a specific version. The
|
|
|
|
// effect of this is that blocking clients will all see an error immediately
|
|
|
|
// without waiting a whole timeout to see it, but clients that just look up
|
|
|
|
// cache with an older index than the last valid result will still see the
|
|
|
|
// result and not the error here. I.e. the error is not "cached" without a
|
2019-03-06 17:13:28 +00:00
|
|
|
// new fetch attempt occurring, but the last good value can still be fetched
|
2019-01-08 10:06:38 +00:00
|
|
|
// from cache.
|
2018-09-06 10:34:28 +00:00
|
|
|
return entry.Value, meta, nil
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
2019-01-08 10:06:38 +00:00
|
|
|
// If this isn't our first time through and our last value has an error, then
|
|
|
|
// we return the error. This has the behavior that we don't sit in a retry
|
|
|
|
// loop getting the same error for the entire duration of the timeout.
|
|
|
|
// Instead, we make one effort to fetch a new value, and if there was an
|
|
|
|
// error, we return. Note that the invariant is that if both entry.Value AND
|
|
|
|
// entry.Error are non-nil, the error _must_ be more recent than the Value. In
|
|
|
|
// other words valid fetches should reset the error. See
|
|
|
|
// https://github.com/hashicorp/consul/issues/4480.
|
2018-06-03 20:15:09 +00:00
|
|
|
if !first && entry.Error != nil {
|
2018-10-02 10:27:10 +00:00
|
|
|
return entry.Value, ResultMeta{Index: entry.Index}, entry.Error
|
2018-04-19 16:19:55 +00:00
|
|
|
}
|
|
|
|
|
2018-06-03 20:15:09 +00:00
|
|
|
if first {
|
2018-04-17 23:03:13 +00:00
|
|
|
// We increment two different counters for cache misses depending on
|
|
|
|
// whether we're missing because we didn't have the data at all,
|
|
|
|
// or if we're missing because we're blocking on a set index.
|
2020-04-03 20:01:56 +00:00
|
|
|
missKey := "miss_block"
|
2020-04-13 18:49:13 +00:00
|
|
|
if r.Info.MinIndex == 0 {
|
2020-04-03 20:01:56 +00:00
|
|
|
missKey = "miss_new"
|
2018-04-17 23:03:13 +00:00
|
|
|
}
|
2020-04-13 18:49:13 +00:00
|
|
|
metrics.IncrCounter([]string{"consul", "cache", r.TypeEntry.Name, missKey}, 1)
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.IncrCounter([]string{"cache", r.TypeEntry.Name, missKey}, 1)
|
2018-04-11 09:18:24 +00:00
|
|
|
}
|
|
|
|
|
2018-04-16 10:06:08 +00:00
|
|
|
// Set our timeout channel if we must
|
2020-04-13 18:49:13 +00:00
|
|
|
if r.Info.Timeout > 0 && timeoutCh == nil {
|
|
|
|
timeoutCh = time.After(r.Info.Timeout)
|
2018-04-16 10:06:08 +00:00
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// At this point, we know we either don't have a value at all or the
|
|
|
|
// value we have is too old. We need to wait for new data.
|
2023-04-19 18:17:21 +00:00
|
|
|
waiterCh := c.fetch(key, r, true, 0, false)
|
2018-04-04 03:46:07 +00:00
|
|
|
|
2019-05-07 10:15:49 +00:00
|
|
|
// No longer our first time through
|
|
|
|
first = false
|
|
|
|
|
2018-04-16 10:06:08 +00:00
|
|
|
select {
|
2020-06-15 15:01:25 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ResultMeta{}, ctx.Err()
|
2018-04-16 10:06:08 +00:00
|
|
|
case <-waiterCh:
|
2018-10-02 10:27:10 +00:00
|
|
|
// Our fetch returned, retry the get from the cache.
|
2020-04-13 18:49:13 +00:00
|
|
|
r.Info.MustRevalidate = false
|
2018-04-16 10:06:08 +00:00
|
|
|
goto RETRY_GET
|
|
|
|
|
|
|
|
case <-timeoutCh:
|
|
|
|
// Timeout on the cache read, just return whatever we have.
|
2018-10-02 10:27:10 +00:00
|
|
|
return entry.Value, ResultMeta{Index: entry.Index}, nil
|
2018-04-16 10:06:08 +00:00
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
func makeEntryKey(t, dc, peerName, token, key string) string {
|
|
|
|
// TODO(peering): figure out if this is the desired format
|
|
|
|
if peerName != "" {
|
|
|
|
return fmt.Sprintf("%s/%s/%s/%s", t, "peer:"+peerName, token, key)
|
|
|
|
}
|
2019-06-27 20:22:07 +00:00
|
|
|
return fmt.Sprintf("%s/%s/%s/%s", t, dc, token, key)
|
2018-04-10 15:05:34 +00:00
|
|
|
}
|
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
// fetch triggers a new background fetch for the given Request. If a
|
|
|
|
// background fetch is already running for a matching Request, the waiter
|
|
|
|
// channel for that request is returned. The effect of this is that there
|
|
|
|
// is only ever one blocking query for any matching requests.
|
|
|
|
//
|
|
|
|
// If allowNew is true then the fetch should create the cache entry
|
|
|
|
// if it doesn't exist. If this is false, then fetch will do nothing
|
|
|
|
// if the entry doesn't exist. This latter case is to support refreshing.
|
|
|
|
func (c *Cache) fetch(key string, r getOptions, allowNew bool, attempt uint, ignoreExisting bool) <-chan struct{} {
|
|
|
|
// We acquire a write lock because we may have to set Fetching to true.
|
2018-04-04 03:46:07 +00:00
|
|
|
c.entriesLock.Lock()
|
|
|
|
defer c.entriesLock.Unlock()
|
2022-10-25 15:27:26 +00:00
|
|
|
ok, entryValid, entry := c.getEntryLocked(r.TypeEntry, key, r.Info)
|
2018-04-04 03:46:07 +00:00
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
// This handles the case where a fetch succeeded after checking for its existence in
|
|
|
|
// getWithIndex. This ensures that we don't miss updates.
|
|
|
|
if ok && entryValid && !ignoreExisting {
|
2018-04-20 00:31:50 +00:00
|
|
|
ch := make(chan struct{})
|
|
|
|
close(ch)
|
2020-04-13 19:57:19 +00:00
|
|
|
return ch
|
2023-04-19 18:17:21 +00:00
|
|
|
}
|
2018-04-20 00:31:50 +00:00
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
// If we aren't allowing new values and we don't have an existing value,
|
|
|
|
// return immediately. We return an immediately-closed channel so nothing
|
|
|
|
// blocks.
|
|
|
|
if !ok && !allowNew {
|
|
|
|
ch := make(chan struct{})
|
|
|
|
close(ch)
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we already have an entry and it is actively fetching, then return
|
|
|
|
// the currently active waiter.
|
|
|
|
if ok && entry.Fetching {
|
2020-04-13 19:57:19 +00:00
|
|
|
return entry.Waiter
|
2023-04-19 18:17:21 +00:00
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
// If we don't have an entry, then create it. The entry must be marked
|
|
|
|
// as invalid so that it isn't returned as a valid value for a zero index.
|
|
|
|
if !ok {
|
2020-07-27 21:11:11 +00:00
|
|
|
entry = cacheEntry{
|
|
|
|
Valid: false,
|
|
|
|
Waiter: make(chan struct{}),
|
|
|
|
FetchRateLimiter: rate.NewLimiter(
|
|
|
|
c.options.EntryFetchRate,
|
|
|
|
c.options.EntryFetchMaxBurst,
|
|
|
|
),
|
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
// Set that we're fetching to true, which makes it so that future
|
|
|
|
// identical calls to fetch will return the same waiter rather than
|
|
|
|
// perform multiple fetches.
|
|
|
|
entry.Fetching = true
|
2018-04-10 15:05:34 +00:00
|
|
|
c.entries[key] = entry
|
2018-04-17 23:03:13 +00:00
|
|
|
metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries)))
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.SetGauge([]string{"cache", "entries_count"}, float32(len(c.entries)))
|
2018-04-04 03:46:07 +00:00
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
tEntry := r.TypeEntry
|
2022-10-17 19:38:10 +00:00
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
// The actual Fetch must be performed in a goroutine. Ensure that we only
|
|
|
|
// have one in-flight at a time, but don't use a deferred
|
|
|
|
// context.WithCancel style termination so that these things outlive their
|
|
|
|
// requester.
|
|
|
|
//
|
|
|
|
// By the time we get here the system WANTS to make a replacement fetcher, so
|
|
|
|
// we terminate the prior one and replace it.
|
|
|
|
handle := c.getOrReplaceFetchHandle(key)
|
|
|
|
go func(handle fetchHandle) {
|
|
|
|
defer c.deleteFetchHandle(key, handle.id)
|
2022-10-25 15:27:26 +00:00
|
|
|
|
2018-09-06 10:34:28 +00:00
|
|
|
// If we have background refresh and currently are in "disconnected" state,
|
|
|
|
// waiting for a response might mean we mark our results as stale for up to
|
|
|
|
// 10 minutes (max blocking timeout) after connection is restored. To reduce
|
|
|
|
// that window, we assume that if the fetch takes more than 31 seconds then
|
|
|
|
// they are correctly blocking. We choose 31 seconds because yamux
|
|
|
|
// keepalives are every 30 seconds so the RPC should fail if the packets are
|
|
|
|
// being blackholed for more than 30 seconds.
|
|
|
|
var connectedTimer *time.Timer
|
2020-07-20 20:09:20 +00:00
|
|
|
if tEntry.Opts.Refresh && entry.Index > 0 && tEntry.Opts.QueryTimeout > 31*time.Second {
|
2018-09-06 10:34:28 +00:00
|
|
|
connectedTimer = time.AfterFunc(31*time.Second, func() {
|
|
|
|
c.entriesLock.Lock()
|
|
|
|
defer c.entriesLock.Unlock()
|
|
|
|
entry, ok := c.entries[key]
|
2023-04-19 18:17:21 +00:00
|
|
|
if !ok || entry.RefreshLostContact.IsZero() {
|
2018-09-06 10:34:28 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
entry.RefreshLostContact = time.Time{}
|
|
|
|
c.entries[key] = entry
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fOpts := FetchOptions{}
|
2020-04-14 22:29:30 +00:00
|
|
|
if tEntry.Opts.SupportsBlocking {
|
2018-09-06 10:34:28 +00:00
|
|
|
fOpts.MinIndex = entry.Index
|
2020-07-20 20:09:20 +00:00
|
|
|
fOpts.Timeout = tEntry.Opts.QueryTimeout
|
|
|
|
|
|
|
|
if fOpts.Timeout == 0 {
|
|
|
|
fOpts.Timeout = 10 * time.Minute
|
|
|
|
}
|
2018-09-06 10:34:28 +00:00
|
|
|
}
|
2019-01-10 12:46:11 +00:00
|
|
|
if entry.Valid {
|
|
|
|
fOpts.LastResult = &FetchResult{
|
|
|
|
Value: entry.Value,
|
|
|
|
State: entry.State,
|
|
|
|
Index: entry.Index,
|
|
|
|
}
|
|
|
|
}
|
2020-07-27 21:11:11 +00:00
|
|
|
if err := entry.FetchRateLimiter.Wait(c.rateLimitContext); err != nil {
|
|
|
|
if connectedTimer != nil {
|
|
|
|
connectedTimer.Stop()
|
|
|
|
}
|
|
|
|
entry.Error = fmt.Errorf("rateLimitContext canceled: %s", err.Error())
|
2023-04-19 18:17:21 +00:00
|
|
|
return
|
2020-07-27 21:11:11 +00:00
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
// Start building the new entry by blocking on the fetch.
|
2020-04-13 18:49:13 +00:00
|
|
|
result, err := r.Fetch(fOpts)
|
2018-09-06 10:34:28 +00:00
|
|
|
if connectedTimer != nil {
|
|
|
|
connectedTimer.Stop()
|
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
// If we were stopped while waiting on a blocking query now would be a
|
|
|
|
// good time to detect that.
|
|
|
|
select {
|
|
|
|
case <-handle.stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// Copy the existing entry to start.
|
|
|
|
newEntry := entry
|
2023-04-19 18:17:21 +00:00
|
|
|
newEntry.Fetching = false
|
2019-01-08 10:06:38 +00:00
|
|
|
|
|
|
|
// Importantly, always reset the Error. Having both Error and a Value that
|
|
|
|
// are non-nil is allowed in the cache entry but it indicates that the Error
|
|
|
|
// is _newer_ than the last good value. So if the err is nil then we need to
|
|
|
|
// reset to replace any _older_ errors and avoid them bubbling up. If the
|
|
|
|
// error is non-nil then we need to set it anyway and used to do it in the
|
|
|
|
// code below. See https://github.com/hashicorp/consul/issues/4480.
|
|
|
|
newEntry.Error = err
|
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
if result.Value != nil {
|
2018-04-16 10:06:08 +00:00
|
|
|
// A new value was given, so we create a brand new entry.
|
2020-07-06 17:26:56 +00:00
|
|
|
if !result.NotModified {
|
|
|
|
newEntry.Value = result.Value
|
|
|
|
}
|
2019-01-10 12:46:11 +00:00
|
|
|
newEntry.State = result.State
|
2018-04-16 10:06:08 +00:00
|
|
|
newEntry.Index = result.Index
|
2018-09-06 10:34:28 +00:00
|
|
|
newEntry.FetchedAt = time.Now()
|
2018-06-15 20:03:19 +00:00
|
|
|
if newEntry.Index < 1 {
|
|
|
|
// Less than one is invalid unless there was an error and in this case
|
|
|
|
// there wasn't since a value was returned. If a badly behaved RPC
|
|
|
|
// returns 0 when it has no data, we might get into a busy loop here. We
|
|
|
|
// set this to minimum of 1 which is safe because no valid user data can
|
|
|
|
// ever be written at raft index 1 due to the bootstrap process for
|
|
|
|
// raft. This insure that any subsequent background refresh request will
|
|
|
|
// always block, but allows the initial request to return immediately
|
|
|
|
// even if there is no data.
|
|
|
|
newEntry.Index = 1
|
|
|
|
}
|
2018-04-16 10:06:08 +00:00
|
|
|
|
|
|
|
// This is a valid entry with a result
|
|
|
|
newEntry.Valid = true
|
2019-01-22 17:19:36 +00:00
|
|
|
} else if result.State != nil && err == nil {
|
|
|
|
// Also set state if it's non-nil but Value is nil. This is important in the
|
|
|
|
// case we are returning nil due to a timeout or a transient error like rate
|
|
|
|
// limiting that we want to mask from the user - there is no result yet but
|
|
|
|
// we want to manage retrying internally before we return an error to user.
|
|
|
|
// The retrying state is in State so we need to still update that in the
|
|
|
|
// entry even if we don't have an actual result yet (e.g. hit a rate limit
|
|
|
|
// on first request for a leaf certificate).
|
|
|
|
newEntry.State = result.State
|
2018-04-16 10:06:08 +00:00
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
|
2021-02-09 15:15:53 +00:00
|
|
|
preventRefresh := acl.IsErrNotFound(err)
|
|
|
|
|
2018-05-09 18:04:52 +00:00
|
|
|
// Error handling
|
|
|
|
if err == nil {
|
2020-07-06 17:26:56 +00:00
|
|
|
labels := []metrics.Label{{Name: "result_not_modified", Value: strconv.FormatBool(result.NotModified)}}
|
2020-11-14 00:26:08 +00:00
|
|
|
// TODO(kit): move tEntry.Name to a label on the first write here and deprecate the second write
|
2020-07-06 17:26:56 +00:00
|
|
|
metrics.IncrCounterWithLabels([]string{"consul", "cache", "fetch_success"}, 1, labels)
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.IncrCounterWithLabels([]string{"cache", "fetch_success"}, 1, labels)
|
2020-07-06 17:26:56 +00:00
|
|
|
metrics.IncrCounterWithLabels([]string{"consul", "cache", tEntry.Name, "fetch_success"}, 1, labels)
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.IncrCounterWithLabels([]string{"cache", tEntry.Name, "fetch_success"}, 1, labels)
|
2018-05-09 18:04:52 +00:00
|
|
|
|
2018-06-15 20:03:19 +00:00
|
|
|
if result.Index > 0 {
|
|
|
|
// Reset the attempts counter so we don't have any backoff
|
2023-04-19 18:17:21 +00:00
|
|
|
attempt = 0
|
2018-06-15 20:03:19 +00:00
|
|
|
} else {
|
|
|
|
// Result having a zero index is an implicit error case. There was no
|
|
|
|
// actual error but it implies the RPC found in index (nothing written
|
|
|
|
// yet for that type) but didn't take care to return safe "1" index. We
|
|
|
|
// don't want to actually treat it like an error by setting
|
|
|
|
// newEntry.Error to something non-nil, but we should guard against 100%
|
|
|
|
// CPU burn hot loops caused by that case which will never block but
|
|
|
|
// also won't backoff either. So we treat it as a failed attempt so that
|
|
|
|
// at least the failure backoff will save our CPU while still
|
|
|
|
// periodically refreshing so normal service can resume when the servers
|
|
|
|
// actually have something to return from the RPC. If we get in this
|
|
|
|
// state it can be considered a bug in the RPC implementation (to ever
|
|
|
|
// return a zero index) however since it can happen this is a safety net
|
|
|
|
// for the future.
|
2023-04-19 18:17:21 +00:00
|
|
|
attempt++
|
2018-06-15 20:03:19 +00:00
|
|
|
}
|
2018-09-06 10:34:28 +00:00
|
|
|
|
|
|
|
// If we have refresh active, this successful response means cache is now
|
|
|
|
// "connected" and should not be stale. Reset the lost contact timer.
|
|
|
|
if tEntry.Opts.Refresh {
|
|
|
|
newEntry.RefreshLostContact = time.Time{}
|
|
|
|
}
|
2018-05-09 18:04:52 +00:00
|
|
|
} else {
|
2021-02-09 15:15:53 +00:00
|
|
|
// TODO (mkeeler) maybe change the name of this label to be more indicative of it just
|
|
|
|
// stopping the background refresh
|
|
|
|
labels := []metrics.Label{{Name: "fatal", Value: strconv.FormatBool(preventRefresh)}}
|
|
|
|
|
2020-11-14 00:26:08 +00:00
|
|
|
// TODO(kit): Add tEntry.Name to label on fetch_error and deprecate second write
|
2021-02-09 15:15:53 +00:00
|
|
|
metrics.IncrCounterWithLabels([]string{"consul", "cache", "fetch_error"}, 1, labels)
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.IncrCounterWithLabels([]string{"cache", "fetch_error"}, 1, labels)
|
2021-02-09 15:15:53 +00:00
|
|
|
metrics.IncrCounterWithLabels([]string{"consul", "cache", tEntry.Name, "fetch_error"}, 1, labels)
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.IncrCounterWithLabels([]string{"cache", tEntry.Name, "fetch_error"}, 1, labels)
|
2018-05-09 18:04:52 +00:00
|
|
|
|
2018-05-09 18:10:17 +00:00
|
|
|
// Increment attempt counter
|
2023-04-19 18:17:21 +00:00
|
|
|
attempt++
|
2018-05-09 18:04:52 +00:00
|
|
|
|
2018-09-06 10:34:28 +00:00
|
|
|
// If we are refreshing and just failed, updated the lost contact time as
|
|
|
|
// our cache will be stale until we get successfully reconnected. We only
|
|
|
|
// set this on the first failure (if it's zero) so we can track how long
|
|
|
|
// it's been since we had a valid connection/up-to-date view of the state.
|
|
|
|
if tEntry.Opts.Refresh && newEntry.RefreshLostContact.IsZero() {
|
|
|
|
newEntry.RefreshLostContact = time.Now()
|
|
|
|
}
|
2018-04-19 16:19:55 +00:00
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// Create a new waiter that will be used for the next fetch.
|
|
|
|
newEntry.Waiter = make(chan struct{})
|
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// Set our entry
|
2018-04-04 03:46:07 +00:00
|
|
|
c.entriesLock.Lock()
|
2018-04-20 01:28:01 +00:00
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
if _, ok := c.entries[key]; !ok {
|
2021-04-08 10:08:56 +00:00
|
|
|
// This entry was evicted during our fetch. DON'T re-insert it or fall
|
|
|
|
// through to the refresh loop below otherwise it will live forever! In
|
|
|
|
// theory there should not be any Get calls waiting on entry.Waiter since
|
|
|
|
// they would have prevented the eviction, but in practice there may be
|
|
|
|
// due to timing and the fact that we don't update the TTL on the entry if
|
|
|
|
// errors are being returned for a while. So we do need to unblock them,
|
|
|
|
// which will mean they recreate the entry again right away and so "reset"
|
|
|
|
// to a good state anyway!
|
|
|
|
c.entriesLock.Unlock()
|
|
|
|
|
|
|
|
// Trigger any waiters that are around.
|
|
|
|
close(entry.Waiter)
|
2023-04-19 18:17:21 +00:00
|
|
|
return
|
2021-04-08 10:08:56 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 01:28:01 +00:00
|
|
|
// If this is a new entry (not in the heap yet), then setup the
|
|
|
|
// initial expiry information and insert. If we're already in
|
|
|
|
// the heap we do nothing since we're reusing the same entry.
|
2020-10-20 23:00:24 +00:00
|
|
|
if newEntry.Expiry == nil || newEntry.Expiry.Index() == ttlcache.NotIndexed {
|
2020-10-20 17:19:21 +00:00
|
|
|
newEntry.Expiry = c.entriesExpiryHeap.Add(key, tEntry.Opts.LastGetTTL)
|
2018-04-20 00:31:50 +00:00
|
|
|
}
|
2018-04-20 01:28:01 +00:00
|
|
|
|
2018-04-10 15:05:34 +00:00
|
|
|
c.entries[key] = newEntry
|
2018-04-04 03:46:07 +00:00
|
|
|
c.entriesLock.Unlock()
|
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// Trigger the old waiter
|
2018-04-04 03:46:07 +00:00
|
|
|
close(entry.Waiter)
|
|
|
|
|
|
|
|
// If refresh is enabled, run the refresh in due time. The refresh
|
|
|
|
// below might block, but saves us from spawning another goroutine.
|
2021-02-09 15:15:53 +00:00
|
|
|
//
|
|
|
|
// We want to have ACL not found errors stop cache refresh for the cases
|
|
|
|
// where the token used for the query was deleted. If the request
|
|
|
|
// was coming from a cache notification then it will start the
|
|
|
|
// request back up again shortly but in the general case this prevents
|
|
|
|
// spamming the logs with tons of ACL not found errors for days.
|
|
|
|
if tEntry.Opts.Refresh && !preventRefresh {
|
2023-04-19 18:17:21 +00:00
|
|
|
// Check if cache was stopped
|
|
|
|
if atomic.LoadUint32(&c.stopped) == 1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're over the attempt minimum, start an exponential backoff.
|
|
|
|
wait := backOffWait(attempt)
|
|
|
|
|
|
|
|
// If we have a timer, wait for it
|
|
|
|
wait += tEntry.Opts.RefreshTimer
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(wait):
|
|
|
|
case <-handle.stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger. The "allowNew" field is false because in the time we were
|
|
|
|
// waiting to refresh we may have expired and got evicted. If that
|
|
|
|
// happened, we don't want to create a new entry.
|
|
|
|
r.Info.MustRevalidate = false
|
|
|
|
r.Info.MinIndex = 0
|
|
|
|
c.fetch(key, r, false, attempt, true)
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
2023-04-19 18:17:21 +00:00
|
|
|
}(handle)
|
|
|
|
|
|
|
|
return entry.Waiter
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cache) getOrReplaceFetchHandle(key string) fetchHandle {
|
|
|
|
c.fetchLock.Lock()
|
|
|
|
defer c.fetchLock.Unlock()
|
|
|
|
|
|
|
|
if prevHandle, ok := c.fetchHandles[key]; ok {
|
|
|
|
close(prevHandle.stopCh)
|
2022-10-17 19:38:10 +00:00
|
|
|
}
|
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
c.lastFetchID++
|
|
|
|
|
|
|
|
handle := fetchHandle{
|
|
|
|
id: c.lastFetchID,
|
|
|
|
stopCh: make(chan struct{}),
|
|
|
|
}
|
|
|
|
|
|
|
|
c.fetchHandles[key] = handle
|
|
|
|
|
|
|
|
return handle
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cache) deleteFetchHandle(key string, fetchID uint64) {
|
|
|
|
c.fetchLock.Lock()
|
|
|
|
defer c.fetchLock.Unlock()
|
|
|
|
|
|
|
|
// Only remove a fetchHandle if it's YOUR fetchHandle.
|
|
|
|
handle, ok := c.fetchHandles[key]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if handle.id == fetchID {
|
|
|
|
delete(c.fetchHandles, key)
|
|
|
|
}
|
2022-10-17 19:38:10 +00:00
|
|
|
}
|
|
|
|
|
2023-04-19 18:17:21 +00:00
|
|
|
func backOffWait(failures uint) time.Duration {
|
|
|
|
if failures > CacheRefreshBackoffMin {
|
|
|
|
shift := failures - CacheRefreshBackoffMin
|
|
|
|
waitTime := CacheRefreshMaxWait
|
2018-10-02 10:27:10 +00:00
|
|
|
if shift < 31 {
|
|
|
|
waitTime = (1 << shift) * time.Second
|
|
|
|
}
|
2023-04-19 18:17:21 +00:00
|
|
|
if waitTime > CacheRefreshMaxWait {
|
|
|
|
waitTime = CacheRefreshMaxWait
|
2018-10-02 10:27:10 +00:00
|
|
|
}
|
2019-01-18 17:44:04 +00:00
|
|
|
return waitTime + lib.RandomStagger(waitTime)
|
2018-10-02 10:27:10 +00:00
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// runExpiryLoop is a blocking function that watches the expiration
|
|
|
|
// heap and invalidates entries that have expired.
|
|
|
|
func (c *Cache) runExpiryLoop() {
|
|
|
|
for {
|
|
|
|
c.entriesLock.RLock()
|
2020-10-20 17:59:53 +00:00
|
|
|
timer := c.entriesExpiryHeap.Next()
|
2018-04-20 00:31:50 +00:00
|
|
|
c.entriesLock.RUnlock()
|
|
|
|
|
|
|
|
select {
|
2018-10-04 10:27:11 +00:00
|
|
|
case <-c.stopCh:
|
2020-10-20 17:59:53 +00:00
|
|
|
timer.Stop()
|
2018-10-04 10:27:11 +00:00
|
|
|
return
|
2018-04-20 00:31:50 +00:00
|
|
|
case <-c.entriesExpiryHeap.NotifyCh:
|
2020-10-20 17:59:53 +00:00
|
|
|
timer.Stop()
|
|
|
|
continue
|
2018-04-20 00:31:50 +00:00
|
|
|
|
2020-10-20 17:59:53 +00:00
|
|
|
case <-timer.Wait():
|
2018-04-20 00:31:50 +00:00
|
|
|
c.entriesLock.Lock()
|
2018-04-20 01:28:01 +00:00
|
|
|
|
2020-10-20 17:59:53 +00:00
|
|
|
entry := timer.Entry
|
2020-10-20 23:09:51 +00:00
|
|
|
if closer, ok := c.entries[entry.Key()].State.(io.Closer); ok {
|
2020-09-18 22:25:56 +00:00
|
|
|
closer.Close()
|
|
|
|
}
|
|
|
|
|
2018-04-20 01:28:01 +00:00
|
|
|
// Entry expired! Remove it.
|
2020-10-20 23:09:51 +00:00
|
|
|
delete(c.entries, entry.Key())
|
2020-10-20 22:34:42 +00:00
|
|
|
c.entriesExpiryHeap.Remove(entry.Index())
|
2018-04-20 01:28:01 +00:00
|
|
|
|
2018-04-20 01:40:12 +00:00
|
|
|
// Set some metrics
|
2018-04-20 00:31:50 +00:00
|
|
|
metrics.IncrCounter([]string{"consul", "cache", "evict_expired"}, 1)
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.IncrCounter([]string{"cache", "evict_expired"}, 1)
|
2018-04-20 01:40:12 +00:00
|
|
|
metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries)))
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.SetGauge([]string{"cache", "entries_count"}, float32(len(c.entries)))
|
2018-04-20 01:40:12 +00:00
|
|
|
|
|
|
|
c.entriesLock.Unlock()
|
2018-04-20 00:31:50 +00:00
|
|
|
}
|
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
2018-10-04 10:27:11 +00:00
|
|
|
|
|
|
|
// Close stops any background work and frees all resources for the cache.
|
|
|
|
// Current Fetch requests are allowed to continue to completion and callers may
|
|
|
|
// still access the current cache values so coordination isn't needed with
|
|
|
|
// callers, however no background activity will continue. It's intended to close
|
|
|
|
// the cache at agent shutdown so no further requests should be made, however
|
|
|
|
// concurrent or in-flight ones won't break.
|
|
|
|
func (c *Cache) Close() error {
|
|
|
|
wasStopped := atomic.SwapUint32(&c.stopped, 1)
|
|
|
|
if wasStopped == 0 {
|
|
|
|
// First time only, close stop chan
|
|
|
|
close(c.stopCh)
|
2020-07-27 21:11:11 +00:00
|
|
|
c.rateLimitCancel()
|
2018-10-04 10:27:11 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-06-27 20:22:07 +00:00
|
|
|
|
|
|
|
// Prepopulate puts something in the cache manually. This is useful when the
|
|
|
|
// correct initial value is know and the cache shouldn't refetch the same thing
|
|
|
|
// on startup. It is used to set the ConnectRootCA and AgentLeafCert when
|
|
|
|
// AutoEncrypt.TLS is turned on. The cache itself cannot fetch that the first
|
|
|
|
// time because it requires a special RPCType. Subsequent runs are fine though.
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
func (c *Cache) Prepopulate(t string, res FetchResult, dc, peerName, token, k string) error {
|
|
|
|
key := makeEntryKey(t, dc, peerName, token, k)
|
2019-06-27 20:22:07 +00:00
|
|
|
newEntry := cacheEntry{
|
2020-04-02 18:35:58 +00:00
|
|
|
Valid: true,
|
|
|
|
Value: res.Value,
|
|
|
|
State: res.State,
|
|
|
|
Index: res.Index,
|
|
|
|
FetchedAt: time.Now(),
|
|
|
|
Waiter: make(chan struct{}),
|
2020-07-27 21:11:11 +00:00
|
|
|
FetchRateLimiter: rate.NewLimiter(
|
|
|
|
c.options.EntryFetchRate,
|
|
|
|
c.options.EntryFetchMaxBurst,
|
|
|
|
),
|
2019-06-27 20:22:07 +00:00
|
|
|
}
|
|
|
|
c.entriesLock.Lock()
|
|
|
|
c.entries[key] = newEntry
|
|
|
|
c.entriesLock.Unlock()
|
|
|
|
return nil
|
|
|
|
}
|