|
|
|
@ -75,12 +75,6 @@ var RPCSummaries = []prometheus.SummaryDefinition{
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
const ( |
|
|
|
|
// jitterFraction is a the limit to the amount of jitter we apply
|
|
|
|
|
// to a user specified MaxQueryTime. We divide the specified time by
|
|
|
|
|
// the fraction. So 16 == 6.25% limit of jitter. This same fraction
|
|
|
|
|
// is applied to the RPCHoldTimeout
|
|
|
|
|
jitterFraction = 16 |
|
|
|
|
|
|
|
|
|
// Warn if the Raft command is larger than this.
|
|
|
|
|
// If it's over 1MB something is probably being abusive.
|
|
|
|
|
raftWarnSize = 1024 * 1024 |
|
|
|
@ -526,7 +520,14 @@ func (c *limitedConn) Read(b []byte) (n int, err error) {
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// canRetry returns true if the request and error indicate that a retry is safe.
|
|
|
|
|
func canRetry(info structs.RPCInfo, err error) bool { |
|
|
|
|
func canRetry(info structs.RPCInfo, err error, start time.Time, config *Config) bool { |
|
|
|
|
if info != nil && info.HasTimedOut(start, config.RPCHoldTimeout, config.MaxQueryTime, config.DefaultQueryTime) { |
|
|
|
|
// RPCInfo timeout may include extra time for MaxQueryTime
|
|
|
|
|
return false |
|
|
|
|
} else if info == nil && time.Since(start) > config.RPCHoldTimeout { |
|
|
|
|
// When not RPCInfo, timeout is only RPCHoldTimeout
|
|
|
|
|
return false |
|
|
|
|
} |
|
|
|
|
// No leader errors are always safe to retry since no state could have
|
|
|
|
|
// been changed.
|
|
|
|
|
if structs.IsErrNoLeader(err) { |
|
|
|
@ -545,16 +546,16 @@ func canRetry(info structs.RPCInfo, err error) bool {
|
|
|
|
|
|
|
|
|
|
// ForwardRPC is used to forward an RPC request to a remote DC or to the local leader
|
|
|
|
|
// Returns a bool of if forwarding was performed, as well as any error
|
|
|
|
|
func (s *Server) ForwardRPC(method string, req structs.RPCInfo, reply interface{}) (bool, error) { |
|
|
|
|
var firstCheck time.Time |
|
|
|
|
func (s *Server) ForwardRPC(method string, info structs.RPCInfo, reply interface{}) (bool, error) { |
|
|
|
|
firstCheck := time.Now() |
|
|
|
|
|
|
|
|
|
// Handle DC forwarding
|
|
|
|
|
dc := req.RequestDatacenter() |
|
|
|
|
dc := info.RequestDatacenter() |
|
|
|
|
if dc != s.config.Datacenter { |
|
|
|
|
// Local tokens only work within the current datacenter. Check to see
|
|
|
|
|
// if we are attempting to forward one to a remote datacenter and strip
|
|
|
|
|
// it, falling back on the anonymous token on the other end.
|
|
|
|
|
if token := req.TokenSecret(); token != "" { |
|
|
|
|
if token := info.TokenSecret(); token != "" { |
|
|
|
|
done, ident, err := s.ResolveIdentityFromToken(token) |
|
|
|
|
if done { |
|
|
|
|
if err != nil && !acl.IsErrNotFound(err) { |
|
|
|
@ -562,18 +563,18 @@ func (s *Server) ForwardRPC(method string, req structs.RPCInfo, reply interface{
|
|
|
|
|
} |
|
|
|
|
if ident != nil && ident.IsLocal() { |
|
|
|
|
// Strip it from the request.
|
|
|
|
|
req.SetTokenSecret("") |
|
|
|
|
defer req.SetTokenSecret(token) |
|
|
|
|
info.SetTokenSecret("") |
|
|
|
|
defer info.SetTokenSecret(token) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
err := s.forwardDC(method, dc, req, reply) |
|
|
|
|
err := s.forwardDC(method, dc, info, reply) |
|
|
|
|
return true, err |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Check if we can allow a stale read, ensure our local DB is initialized
|
|
|
|
|
if req.IsRead() && req.AllowStaleRead() && !s.raft.LastContact().IsZero() { |
|
|
|
|
if info.IsRead() && info.AllowStaleRead() && !s.raft.LastContact().IsZero() { |
|
|
|
|
return false, nil |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -596,20 +597,15 @@ CHECK_LEADER:
|
|
|
|
|
// Handle the case of a known leader
|
|
|
|
|
if leader != nil { |
|
|
|
|
rpcErr = s.connPool.RPC(s.config.Datacenter, leader.ShortName, leader.Addr, |
|
|
|
|
method, req, reply) |
|
|
|
|
if rpcErr != nil && canRetry(req, rpcErr) { |
|
|
|
|
goto RETRY |
|
|
|
|
method, info, reply) |
|
|
|
|
if rpcErr == nil { |
|
|
|
|
return true, nil |
|
|
|
|
} |
|
|
|
|
return true, rpcErr |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
RETRY: |
|
|
|
|
// Gate the request until there is a leader
|
|
|
|
|
if firstCheck.IsZero() { |
|
|
|
|
firstCheck = time.Now() |
|
|
|
|
} |
|
|
|
|
if time.Since(firstCheck) < s.config.RPCHoldTimeout { |
|
|
|
|
jitter := lib.RandomStagger(s.config.RPCHoldTimeout / jitterFraction) |
|
|
|
|
if retry := canRetry(info, rpcErr, firstCheck, s.config); retry { |
|
|
|
|
// Gate the request until there is a leader
|
|
|
|
|
jitter := lib.RandomStagger(s.config.RPCHoldTimeout / structs.JitterFraction) |
|
|
|
|
select { |
|
|
|
|
case <-time.After(jitter): |
|
|
|
|
goto CHECK_LEADER |
|
|
|
@ -832,7 +828,7 @@ func (s *Server) blockingQuery(queryOpts structs.QueryOptionsCompat, queryMeta s
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Apply a small amount of jitter to the request.
|
|
|
|
|
queryTimeout += lib.RandomStagger(queryTimeout / jitterFraction) |
|
|
|
|
queryTimeout += lib.RandomStagger(queryTimeout / structs.JitterFraction) |
|
|
|
|
|
|
|
|
|
// wrap the base context with a deadline
|
|
|
|
|
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(queryTimeout)) |
|
|
|
@ -933,7 +929,7 @@ func (s *Server) consistentRead() error {
|
|
|
|
|
if s.isReadyForConsistentReads() { |
|
|
|
|
return nil |
|
|
|
|
} |
|
|
|
|
jitter := lib.RandomStagger(s.config.RPCHoldTimeout / jitterFraction) |
|
|
|
|
jitter := lib.RandomStagger(s.config.RPCHoldTimeout / structs.JitterFraction) |
|
|
|
|
deadline := time.Now().Add(s.config.RPCHoldTimeout) |
|
|
|
|
|
|
|
|
|
for time.Now().Before(deadline) { |
|
|
|
|