Add autopilot server health tracking

This adds two goroutines to perform autopilot tasks on the leader - one
to monitor the health of servers and another to periodically clean up
dead servers with a limit on removal count. Also adds a new http endpoint,
`/v1/operator/autopilot/health`, for querying this information through an
operator RPC endpoint.
pull/2788/head
Kyle Havlovitz 2017-03-01 14:04:40 -08:00
parent ab6c49ab4c
commit 2eefe3ca5b
No known key found for this signature in database
GPG Key ID: 8A5E6B173056AD6C
22 changed files with 1086 additions and 118 deletions

View File

@ -6,6 +6,7 @@ import (
"io"
"strconv"
"strings"
"time"
)
// Operator can be used to perform low-level operator tasks for Consul.
@ -79,6 +80,19 @@ type AutopilotConfiguration struct {
// peer list when a new server joins
CleanupDeadServers bool
// LastContactThreshold is the limit on the amount of time a server can go
// without leader contact before being considered unhealthy.
LastContactThreshold time.Duration
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
// be behind before being considered unhealthy.
MaxTrailingLogs uint64
// ServerStabilizationTime is the minimum amount of time a server must be
// in a stable, healthy state before it can be added to the cluster. Only
// applicable with Raft protocol version 3 or higher.
ServerStabilizationTime time.Duration
// CreateIndex holds the index corresponding the creation of this configuration.
// This is a read-only field.
CreateIndex uint64

View File

@ -419,6 +419,15 @@ func (a *Agent) consulConfig() *consul.Config {
if a.config.Autopilot.CleanupDeadServers != nil {
base.AutopilotConfig.CleanupDeadServers = *a.config.Autopilot.CleanupDeadServers
}
if a.config.Autopilot.LastContactThreshold != nil {
base.AutopilotConfig.LastContactThreshold = *a.config.Autopilot.LastContactThreshold
}
if a.config.Autopilot.MaxTrailingLogs != nil {
base.AutopilotConfig.MaxTrailingLogs = *a.config.Autopilot.MaxTrailingLogs
}
if a.config.Autopilot.ServerStabilizationTime != nil {
base.AutopilotConfig.ServerStabilizationTime = *a.config.Autopilot.ServerStabilizationTime
}
// Format the build string
revision := a.config.Revision

View File

@ -135,6 +135,8 @@ func (c *Command) readConfig() *Config {
f.IntVar(&cmdConfig.Protocol, "protocol", -1,
"Sets the protocol version. Defaults to latest.")
f.IntVar(&cmdConfig.RaftProtocol, "raft-protocol", -1,
"Sets the Raft protocol version. Defaults to latest.")
f.BoolVar(&cmdConfig.EnableSyslog, "syslog", false,
"Enables logging to syslog.")

View File

@ -265,6 +265,21 @@ type Autopilot struct {
// CleanupDeadServers enables the automatic cleanup of dead servers when new ones
// are added to the peer list. Defaults to true.
CleanupDeadServers *bool `mapstructure:"cleanup_dead_servers"`
// LastContactThreshold is the limit on the amount of time a server can go
// without leader contact before being considered unhealthy.
LastContactThreshold *time.Duration `mapstructure:"-" json:"-"`
LastContactThresholdRaw string `mapstructure:"last_contact_threshold"`
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
// be behind before being considered unhealthy.
MaxTrailingLogs *uint64 `mapstructure:"max_trailing_logs"`
// ServerStabilizationTime is the minimum amount of time a server must be
// in a stable, healthy state before it can be added to the cluster. Only
// applicable with Raft protocol version 3 or higher.
ServerStabilizationTime *time.Duration `mapstructure:"-" json:"-"`
ServerStabilizationTimeRaw string `mapstructure:"server_stabilization_time"`
}
// Config is the configuration that can be set for an Agent.
@ -692,6 +707,16 @@ func Bool(b bool) *bool {
return &b
}
// Uint64 is used to initialize uint64 pointers in struct literals.
func Uint64(i uint64) *uint64 {
return &i
}
// Duration is used to initialize time.Duration pointers in struct literals.
func Duration(d time.Duration) *time.Duration {
return &d
}
// UnixSocketPermissions contains information about a unix socket, and
// implements the FilePermissions interface.
type UnixSocketPermissions struct {
@ -1041,6 +1066,21 @@ func DecodeConfig(r io.Reader) (*Config, error) {
result.ReconnectTimeoutWan = dur
}
if raw := result.Autopilot.LastContactThresholdRaw; raw != "" {
dur, err := time.ParseDuration(raw)
if err != nil {
return nil, fmt.Errorf("LastContactThreshold invalid: %v", err)
}
result.Autopilot.LastContactThreshold = &dur
}
if raw := result.Autopilot.ServerStabilizationTimeRaw; raw != "" {
dur, err := time.ParseDuration(raw)
if err != nil {
return nil, fmt.Errorf("ServerStabilizationTime invalid: %v", err)
}
result.Autopilot.ServerStabilizationTime = &dur
}
// Merge the single recursor
if result.DNSRecursor != "" {
result.DNSRecursors = append(result.DNSRecursors, result.DNSRecursor)
@ -1293,7 +1333,7 @@ func MergeConfig(a, b *Config) *Config {
if b.Protocol > 0 {
result.Protocol = b.Protocol
}
if b.RaftProtocol != 0 {
if b.RaftProtocol > 0 {
result.RaftProtocol = b.RaftProtocol
}
if b.NodeID != "" {
@ -1347,6 +1387,15 @@ func MergeConfig(a, b *Config) *Config {
if b.Autopilot.CleanupDeadServers != nil {
result.Autopilot.CleanupDeadServers = b.Autopilot.CleanupDeadServers
}
if b.Autopilot.LastContactThreshold != nil {
result.Autopilot.LastContactThreshold = b.Autopilot.LastContactThreshold
}
if b.Autopilot.MaxTrailingLogs != nil {
result.Autopilot.MaxTrailingLogs = b.Autopilot.MaxTrailingLogs
}
if b.Autopilot.ServerStabilizationTime != nil {
result.Autopilot.ServerStabilizationTime = b.Autopilot.ServerStabilizationTime
}
if b.Telemetry.DisableHostname == true {
result.Telemetry.DisableHostname = true
}

View File

@ -1103,13 +1103,27 @@ func TestDecodeConfig_Performance(t *testing.T) {
}
func TestDecodeConfig_Autopilot(t *testing.T) {
input := `{"autopilot": { "cleanup_dead_servers": true }}`
input := `{"autopilot": {
"cleanup_dead_servers": true,
"last_contact_threshold": "100ms",
"max_trailing_logs": 10,
"server_stabilization_time": "10s"
}}`
config, err := DecodeConfig(bytes.NewReader([]byte(input)))
if err != nil {
t.Fatalf("err: %s", err)
}
if config.Autopilot.CleanupDeadServers == nil || !*config.Autopilot.CleanupDeadServers {
t.Fatalf("bad: cleanup_dead_servers isn't set: %#v", config)
t.Fatalf("bad: %#v", config)
}
if config.Autopilot.LastContactThreshold == nil || *config.Autopilot.LastContactThreshold != 100*time.Millisecond {
t.Fatalf("bad: %#v", config)
}
if config.Autopilot.MaxTrailingLogs == nil || *config.Autopilot.MaxTrailingLogs != 10 {
t.Fatalf("bad: %#v", config)
}
if config.Autopilot.ServerStabilizationTime == nil || *config.Autopilot.ServerStabilizationTime != 10*time.Second {
t.Fatalf("bad: %#v", config)
}
}
@ -1629,7 +1643,10 @@ func TestMergeConfig(t *testing.T) {
SkipLeaveOnInt: Bool(true),
RaftProtocol: 3,
Autopilot: Autopilot{
CleanupDeadServers: Bool(true),
CleanupDeadServers: Bool(true),
LastContactThreshold: Duration(time.Duration(10)),
MaxTrailingLogs: Uint64(10),
ServerStabilizationTime: Duration(time.Duration(100)),
},
EnableDebug: true,
VerifyIncoming: true,

View File

@ -298,6 +298,7 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) {
s.handleFuncMetrics("/v1/operator/raft/peer", s.wrap(s.OperatorRaftPeer))
s.handleFuncMetrics("/v1/operator/keyring", s.wrap(s.OperatorKeyringEndpoint))
s.handleFuncMetrics("/v1/operator/autopilot/configuration", s.wrap(s.OperatorAutopilotConfiguration))
s.handleFuncMetrics("/v1/operator/autopilot/health", s.wrap(s.OperatorServerHealth))
s.handleFuncMetrics("/v1/query", s.wrap(s.PreparedQueryGeneral))
s.handleFuncMetrics("/v1/query/", s.wrap(s.PreparedQuerySpecific))
s.handleFuncMetrics("/v1/session/create", s.wrap(s.SessionCreate))

View File

@ -224,3 +224,23 @@ func (s *HTTPServer) OperatorAutopilotConfiguration(resp http.ResponseWriter, re
return nil, nil
}
}
// OperatorServerHealth is used to get the health of the servers in the local DC
func (s *HTTPServer) OperatorServerHealth(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "GET" {
resp.WriteHeader(http.StatusMethodNotAllowed)
return nil, nil
}
var args structs.DCSpecificRequest
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
var reply structs.OperatorHealthReply
if err := s.agent.RPC("Operator.ServerHealth", &args, &reply); err != nil {
return nil, err
}
return reply, nil
}

View File

@ -8,7 +8,9 @@ import (
"strings"
"testing"
"github.com/hashicorp/consul-enterprise/testutil"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/serf/serf"
)
func TestOperator_OperatorRaftConfiguration(t *testing.T) {
@ -420,3 +422,40 @@ func TestOperator_AutopilotCASConfiguration(t *testing.T) {
}
})
}
func TestOperator_OperatorServerHealth(t *testing.T) {
httpTest(t, func(srv *HTTPServer) {
body := bytes.NewBuffer(nil)
req, err := http.NewRequest("GET", "/v1/operator/autopilot/health", body)
if err != nil {
t.Fatalf("err: %v", err)
}
testutil.WaitForResult(func() (bool, error) {
resp := httptest.NewRecorder()
obj, err := srv.OperatorServerHealth(resp, req)
if err != nil {
return false, fmt.Errorf("err: %v", err)
}
if resp.Code != 200 {
return false, fmt.Errorf("bad code: %d", resp.Code)
}
out, ok := obj.(structs.OperatorHealthReply)
if !ok {
return false, fmt.Errorf("unexpected: %T", obj)
}
if len(out.Servers) != 1 ||
!out.Servers[0].Healthy ||
out.Servers[0].Name != srv.agent.config.NodeName ||
out.Servers[0].SerfStatusRaw != serf.StatusAlive ||
out.FailureTolerance != 0 {
return false, fmt.Errorf("bad: %v", out)
}
return true, nil
}, func(err error) {
t.Fatal(err)
})
})
}

View File

@ -56,6 +56,9 @@ func (c *OperatorAutopilotGetCommand) Run(args []string) int {
return 1
}
c.Ui.Output(fmt.Sprintf("CleanupDeadServers = %v", config.CleanupDeadServers))
c.Ui.Output(fmt.Sprintf("LastContactThreshold = %v", config.LastContactThreshold.String()))
c.Ui.Output(fmt.Sprintf("MaxTrailingLogs = %v", config.MaxTrailingLogs))
c.Ui.Output(fmt.Sprintf("ServerStabilizationTime = %v", config.ServerStabilizationTime.String()))
return 0
}

View File

@ -4,6 +4,7 @@ import (
"flag"
"fmt"
"strings"
"time"
"github.com/hashicorp/consul/command/base"
)
@ -29,12 +30,27 @@ func (c *OperatorAutopilotSetCommand) Synopsis() string {
func (c *OperatorAutopilotSetCommand) Run(args []string) int {
var cleanupDeadServers base.BoolValue
var lastContactThresholdRaw string
var maxTrailingLogs base.UintValue
var serverStabilizationTimeRaw string
f := c.Command.NewFlagSet(c)
f.Var(&cleanupDeadServers, "cleanup-dead-servers",
"Controls whether Consul will automatically remove dead servers "+
"when new ones are successfully added. Must be one of `true|false`.")
f.Var(&maxTrailingLogs, "max-trailing-logs",
"Controls the maximum number of log entries that a server can trail the "+
"leader by before being considered unhealthy.")
f.StringVar(&lastContactThresholdRaw, "last-contact-threshold", "",
"Controls the maximum amount of time a server can go without contact "+
"from the leader before being considered unhealthy. Must be a duration value "+
"such as `10s`.")
f.StringVar(&serverStabilizationTimeRaw, "server-stabilization-time", "",
"Controls the minimum amount of time a server must be stable in the "+
"'healthy' state before being added to the cluster. Only takes effect if all "+
"servers are running Raft protocol version 3 or higher. Must be a duration "+
"value such as `10s`.")
if err := c.Command.Parse(args); err != nil {
if err == flag.ErrHelp {
@ -59,8 +75,27 @@ func (c *OperatorAutopilotSetCommand) Run(args []string) int {
return 1
}
// Update the config values.
// Update the config values based on the set flags.
cleanupDeadServers.Merge(&conf.CleanupDeadServers)
trailing := uint(conf.MaxTrailingLogs)
maxTrailingLogs.Merge(&trailing)
conf.MaxTrailingLogs = uint64(trailing)
if lastContactThresholdRaw != "" {
dur, err := time.ParseDuration(lastContactThresholdRaw)
if err != nil {
c.Ui.Error(fmt.Sprintf("invalid value for last-contact-threshold: %v", err))
return 1
}
conf.LastContactThreshold = dur
}
if serverStabilizationTimeRaw != "" {
dur, err := time.ParseDuration(serverStabilizationTimeRaw)
if err != nil {
c.Ui.Error(fmt.Sprintf("invalid value for server-stabilization-time: %v", err))
}
conf.ServerStabilizationTime = dur
}
// Check-and-set the new configuration.
result, err := operator.AutopilotCASConfiguration(conf, nil)

286
consul/autopilot.go Normal file
View File

@ -0,0 +1,286 @@
package consul
import (
"fmt"
"strconv"
"time"
"github.com/hashicorp/consul/consul/agent"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
)
func (s *Server) startAutopilot() {
s.autopilotShutdownCh = make(chan struct{})
go s.serverHealthLoop()
go s.removeDeadLoop()
}
func (s *Server) stopAutopilot() {
close(s.autopilotShutdownCh)
}
// serverHealthLoop monitors the health of the servers in the cluster
func (s *Server) serverHealthLoop() {
// Monitor server health until shutdown
ticker := time.NewTicker(s.config.ServerHealthInterval)
for {
select {
case <-s.autopilotShutdownCh:
ticker.Stop()
return
case <-ticker.C:
serverHealths := make(map[string]*structs.ServerHealth)
state := s.fsm.State()
_, autopilotConf, err := state.AutopilotConfig()
if err != nil {
s.logger.Printf("[ERR] consul: error retrieving autopilot config: %s", err)
}
// Build an updated map of server healths
for _, member := range s.LANMembers() {
if member.Status == serf.StatusLeft {
continue
}
valid, parts := agent.IsConsulServer(member)
if valid {
health := s.queryServerHealth(member, parts, autopilotConf)
serverHealths[parts.Addr.String()] = health
}
}
s.autopilotLock.Lock()
s.autopilotHealth = serverHealths
s.autopilotLock.Unlock()
if err := s.promoteNonVoters(autopilotConf); err != nil {
s.logger.Printf("[ERR] consul: error checking for non-voters to promote: %s", err)
}
}
}
}
// removeDeadLoop checks for dead servers periodically, or when receiving on autopilotRemoveDeadCh
func (s *Server) removeDeadLoop() {
ticker := time.NewTicker(s.config.RemoveDeadInterval)
for {
select {
case <-s.autopilotShutdownCh:
ticker.Stop()
return
case <-ticker.C:
if err := s.pruneDeadServers(); err != nil {
s.logger.Printf("[ERR] consul: error checking for dead servers to remove: %s", err)
}
case <-s.autopilotRemoveDeadCh:
if err := s.pruneDeadServers(); err != nil {
s.logger.Printf("[ERR] consul: error checking for dead servers to remove: %s", err)
}
}
}
}
// pruneDeadServers removes up to numPeers/2 failed servers
func (s *Server) pruneDeadServers() error {
state := s.fsm.State()
_, autopilotConf, err := state.AutopilotConfig()
if err != nil {
return err
}
// Look for dead servers to clean up
peers, err := s.numPeers()
if err != nil {
return err
}
removed := 0
if autopilotConf.CleanupDeadServers {
for _, member := range s.serfLAN.Members() {
// Exit early if we already removed the max amount of servers
if removed == peers/2 {
break
}
valid, _ := agent.IsConsulServer(member)
if valid && member.Status == serf.StatusFailed {
removed++
s.logger.Printf("[INFO] consul: Attempting removal of failed server: %v", member.Name)
go s.serfLAN.RemoveFailedNode(member.Name)
}
}
}
return nil
}
// promoteNonVoters promotes eligible non-voting servers to voters.
func (s *Server) promoteNonVoters(autopilotConf *structs.AutopilotConfig) error {
minRaftProtocol, err := ServerMinRaftProtocol(s.LANMembers())
if err != nil {
return fmt.Errorf("error getting server raft protocol versions: %s", err)
}
if minRaftProtocol >= 3 {
future := s.raft.GetConfiguration()
if err := future.Error(); err != nil {
return fmt.Errorf("failed to get raft configuration: %v", err)
}
var promotions []raft.Server
raftServers := future.Configuration().Servers
voterCount := 0
for _, server := range raftServers {
// If this server has been stable and passing for long enough, promote it to a voter
if server.Suffrage == raft.Nonvoter {
health := s.getServerHealth(string(server.Address))
if health != nil && health.Healthy && time.Now().Sub(health.StableSince) >= autopilotConf.ServerStabilizationTime {
promotions = append(promotions, server)
}
} else {
voterCount++
}
}
// Exit early if there's nothing to promote
if len(promotions) == 0 {
return nil
}
// If there's currently an even number of servers, we can promote the first server in the list
// to get to an odd-sized quorum
newServers := false
if voterCount%2 == 0 {
addFuture := s.raft.AddVoter(promotions[0].ID, promotions[0].Address, 0, 0)
if err := addFuture.Error(); err != nil {
return fmt.Errorf("failed to add raft peer: %v", err)
}
promotions = promotions[1:]
newServers = true
}
// Promote remaining servers in twos to maintain an odd quorum size
for i := 0; i < len(promotions)-1; i += 2 {
addFirst := s.raft.AddVoter(promotions[i].ID, promotions[i].Address, 0, 0)
if err := addFirst.Error(); err != nil {
return fmt.Errorf("failed to add raft peer: %v", err)
}
addSecond := s.raft.AddVoter(promotions[i+1].ID, promotions[i+1].Address, 0, 0)
if err := addSecond.Error(); err != nil {
return fmt.Errorf("failed to add raft peer: %v", err)
}
newServers = true
}
// If we added a new server, trigger a check to remove dead servers
if newServers {
go func() {
s.autopilotRemoveDeadCh <- struct{}{}
}()
}
}
return nil
}
// queryServerHealth fetches the raft stats for the given server and uses them
// to update its ServerHealth
func (s *Server) queryServerHealth(member serf.Member, server *agent.Server, autopilotConf *structs.AutopilotConfig) *structs.ServerHealth {
health := &structs.ServerHealth{
ID: server.ID,
Name: server.Name,
SerfStatusRaw: member.Status,
SerfStatus: member.Status.String(),
LastContactRaw: -1,
LastContact: "never",
}
stats, err := s.getServerStats(server)
if err != nil {
s.logger.Printf("[DEBUG] consul: error getting server's raft stats: %s", err)
}
if v, ok := stats["last_contact"]; ok && v != "never" {
health.LastContactRaw, err = time.ParseDuration(v)
if err != nil {
s.logger.Printf("[DEBUG] consul: error parsing server's last_contact value: %s", err)
}
health.LastContact = health.LastContactRaw.String()
}
// Set LastContact to 0 if we're the leader
if s.config.NodeName == member.Name {
health.LastContactRaw = 0
health.LastContact = "leader"
}
if v, ok := stats["last_log_index"]; ok {
health.LastIndex, err = strconv.ParseUint(v, 10, 64)
if err != nil {
s.logger.Printf("[DEBUG] consul: error parsing server's last_log_index value: %s", err)
}
}
if v, ok := stats["last_log_term"]; ok {
health.LastTerm, err = strconv.ParseUint(v, 10, 64)
if err != nil {
s.logger.Printf("[DEBUG] consul: error parsing server's last_log_term value: %s", err)
}
}
health.Healthy = s.isServerHealthy(health, autopilotConf)
// If this is a new server or the health changed, reset StableSince
lastHealth := s.getServerHealth(server.Addr.String())
if lastHealth == nil || lastHealth.Healthy != health.Healthy {
health.StableSince = time.Now()
} else {
health.StableSince = lastHealth.StableSince
}
return health
}
func (s *Server) getServerHealth(addr string) *structs.ServerHealth {
s.autopilotLock.RLock()
defer s.autopilotLock.RUnlock()
h, ok := s.autopilotHealth[addr]
if !ok {
return nil
}
return h
}
func (s *Server) getServerStats(server *agent.Server) (map[string]string, error) {
var args struct{}
var reply map[string]string
err := s.connPool.RPC(s.config.Datacenter, server.Addr, server.Version, "Operator.RaftStats", &args, &reply)
return reply, err
}
// isServerHealthy determines whether the given ServerHealth is healthy
// based on the current Autopilot config
func (s *Server) isServerHealthy(health *structs.ServerHealth, autopilotConf *structs.AutopilotConfig) bool {
if health.SerfStatusRaw != serf.StatusAlive {
return false
}
if health.LastContactRaw > autopilotConf.LastContactThreshold || health.LastContactRaw < 0 {
return false
}
lastTerm, _ := strconv.ParseUint(s.raft.Stats()["last_log_term"], 10, 64)
if health.LastTerm != lastTerm {
return false
}
if s.raft.LastIndex() > autopilotConf.MaxTrailingLogs &&
health.LastIndex < s.raft.LastIndex()-autopilotConf.MaxTrailingLogs {
return false
}
return true
}

235
consul/autopilot_test.go Normal file
View File

@ -0,0 +1,235 @@
package consul
import (
"fmt"
"os"
"testing"
"time"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
)
func TestAutopilot_CleanupDeadServer(t *testing.T) {
dir1, s1 := testServerDCBootstrap(t, "dc1", true)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
servers := []*Server{s1, s2, s3}
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
for _, s := range servers {
testutil.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}, func(err error) {
t.Fatalf("should have 3 peers")
})
}
// Kill a non-leader server
s2.Shutdown()
testutil.WaitForResult(func() (bool, error) {
alive := 0
for _, m := range s1.LANMembers() {
if m.Status == serf.StatusAlive {
alive++
}
}
return alive == 2, nil
}, func(err error) {
t.Fatalf("should have 2 alive members")
})
// Bring up and join a new server
dir4, s4 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir4)
defer s4.Shutdown()
if _, err := s4.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
servers[1] = s4
// Make sure the dead server is removed and we're back to 3 total peers
for _, s := range servers {
testutil.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}, func(err error) {
t.Fatalf("should have 3 peers")
})
}
}
func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = true
c.RemoveDeadInterval = 100 * time.Millisecond
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
conf := func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = false
}
dir2, s2 := testServerWithConfig(t, conf)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
dir3, s3 := testServerWithConfig(t, conf)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
servers := []*Server{s1, s2, s3}
// Join the servers to s1
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
for _, s := range servers {
testutil.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}, func(err error) {
t.Fatalf("should have 3 peers")
})
}
// Kill a non-leader server
s3.Shutdown()
// Should be removed from the peers automatically
for _, s := range []*Server{s1, s2} {
testutil.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 2, nil
}, func(err error) {
t.Fatalf("should have 2 peers")
})
}
}
func TestAutopilot_PromoteNonVoter(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = true
c.RaftConfig.ProtocolVersion = 3
c.AutopilotConfig.ServerStabilizationTime = 200 * time.Millisecond
c.ServerHealthInterval = 100 * time.Millisecond
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = false
c.RaftConfig.ProtocolVersion = 3
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
testutil.WaitForLeader(t, s1.RPC, "dc1")
// Wait for the new server to be added as a non-voter, but make sure
// it doesn't get promoted to a voter even after ServerStabilizationTime,
// because that would result in an even-numbered quorum count.
testutil.WaitForResult(func() (bool, error) {
future := s1.raft.GetConfiguration()
if err := future.Error(); err != nil {
return false, err
}
servers := future.Configuration().Servers
if len(servers) != 2 {
return false, fmt.Errorf("bad: %v", servers)
}
if servers[1].Suffrage != raft.Nonvoter {
return false, fmt.Errorf("bad: %v", servers)
}
health := s1.getServerHealth(string(servers[1].Address))
if health == nil {
return false, fmt.Errorf("nil health")
}
if !health.Healthy {
return false, fmt.Errorf("bad: %v", health)
}
if time.Now().Sub(health.StableSince) < s1.config.AutopilotConfig.ServerStabilizationTime {
return false, fmt.Errorf("stable period not elapsed")
}
return true, nil
}, func(err error) {
t.Fatal(err)
})
// Now add another server and make sure they both get promoted to voters after stabilization
dir3, s3 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = false
c.RaftConfig.ProtocolVersion = 3
})
defer os.RemoveAll(dir3)
defer s3.Shutdown()
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
testutil.WaitForResult(func() (bool, error) {
future := s1.raft.GetConfiguration()
if err := future.Error(); err != nil {
return false, err
}
servers := future.Configuration().Servers
if len(servers) != 3 {
return false, fmt.Errorf("bad: %v", servers)
}
if servers[1].Suffrage != raft.Voter {
return false, fmt.Errorf("bad: %v", servers)
}
if servers[2].Suffrage != raft.Voter {
return false, fmt.Errorf("bad: %v", servers)
}
return true, nil
}, func(err error) {
t.Fatal(err)
})
}

View File

@ -279,6 +279,14 @@ type Config struct {
// AutopilotConfig is used to apply the initial autopilot config when
// bootstrapping.
AutopilotConfig *structs.AutopilotConfig
// ServerHealthInterval is the frequency with which the leader will check
// the health of the servers in the cluster
ServerHealthInterval time.Duration
// RemoveDeadInterval is the frequency with which the leader will look for
// dead servers to remove from the cluster
RemoveDeadInterval time.Duration
}
// CheckVersion is used to check if the ProtocolVersion is valid
@ -353,8 +361,13 @@ func DefaultConfig() *Config {
TLSMinVersion: "tls10",
AutopilotConfig: &structs.AutopilotConfig{
CleanupDeadServers: true,
CleanupDeadServers: true,
LastContactThreshold: 200 * time.Millisecond,
MaxTrailingLogs: 250,
ServerStabilizationTime: 10 * time.Second,
},
ServerHealthInterval: 1 * time.Second,
RemoveDeadInterval: 30 * time.Second,
}
// Increase our reap interval to 3 days instead of 24h.

View File

@ -159,6 +159,8 @@ func (s *Server) establishLeadership() error {
return err
}
s.startAutopilot()
return nil
}
@ -174,6 +176,9 @@ func (s *Server) revokeLeadership() error {
s.logger.Printf("[ERR] consul: Clearing session timers failed: %v", err)
return err
}
s.stopAutopilot()
return nil
}
@ -597,13 +602,20 @@ func (s *Server) joinConsulServer(m serf.Member, parts *agent.Server) error {
return err
}
if minRaftProtocol >= 2 && parts.RaftVersion >= 3 {
switch {
case minRaftProtocol >= 3:
addFuture := s.raft.AddNonvoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
if err := addFuture.Error(); err != nil {
s.logger.Printf("[ERR] consul: failed to add raft peer: %v", err)
return err
}
case minRaftProtocol == 2 && parts.RaftVersion >= 3:
addFuture := s.raft.AddVoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
if err := addFuture.Error(); err != nil {
s.logger.Printf("[ERR] consul: failed to add raft peer: %v", err)
return err
}
} else {
default:
addFuture := s.raft.AddPeer(raft.ServerAddress(addr))
if err := addFuture.Error(); err != nil {
s.logger.Printf("[ERR] consul: failed to add raft peer: %v", err)
@ -611,22 +623,10 @@ func (s *Server) joinConsulServer(m serf.Member, parts *agent.Server) error {
}
}
state := s.fsm.State()
_, autopilotConf, err := state.AutopilotConfig()
if err != nil {
return err
}
// Look for dead servers to clean up
if autopilotConf.CleanupDeadServers {
for _, member := range s.serfLAN.Members() {
valid, _ := agent.IsConsulServer(member)
if valid && member.Name != m.Name && member.Status == serf.StatusFailed {
s.logger.Printf("[INFO] consul: Attempting removal of failed server: %v", member.Name)
go s.serfLAN.RemoveFailedNode(member.Name)
}
}
}
// Trigger a check to remove dead servers
go func() {
s.autopilotRemoveDeadCh <- struct{}{}
}()
return nil
}

View File

@ -623,76 +623,6 @@ func TestLeader_ReapTombstones(t *testing.T) {
})
}
func TestLeader_CleanupDeadServers(t *testing.T) {
dir1, s1 := testServerDCBootstrap(t, "dc1", true)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
servers := []*Server{s1, s2, s3}
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
for _, s := range servers {
testutil.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}, func(err error) {
t.Fatalf("should have 3 peers")
})
}
// Kill a non-leader server
s2.Shutdown()
testutil.WaitForResult(func() (bool, error) {
alive := 0
for _, m := range s1.LANMembers() {
if m.Status == serf.StatusAlive {
alive++
}
}
return alive == 2, nil
}, func(err error) {
t.Fatalf("should have 2 alive members")
})
// Bring up and join a new server
dir4, s4 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir4)
defer s4.Shutdown()
if _, err := s4.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
servers[1] = s4
// Make sure the dead server is removed and we're back to 3 total peers
for _, s := range servers {
testutil.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}, func(err error) {
t.Fatalf("should have 3 peers")
})
}
}
func TestLeader_RollRaftServer(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = true

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
"time"
)
// Operator endpoint is used to perform low-level operator tasks for Consul.
@ -183,3 +184,63 @@ func (op *Operator) AutopilotSetConfiguration(args *structs.AutopilotSetConfigRe
}
return nil
}
// Used by Autopilot to query the raft stats of the local server.
func (op *Operator) RaftStats(args struct{}, reply *map[string]string) error {
*reply = op.srv.raft.Stats()
return nil
}
// ServerHealth is used to get the current health of the servers.
func (op *Operator) ServerHealth(args *structs.DCSpecificRequest, reply *structs.OperatorHealthReply) error {
// This must be sent to the leader, so we fix the args since we are
// re-using a structure where we don't support all the options.
args.RequireConsistent = true
args.AllowStale = false
if done, err := op.srv.forward("Operator.ServerHealth", args, args, reply); done {
return err
}
// This action requires operator read access.
acl, err := op.srv.resolveToken(args.Token)
if err != nil {
return err
}
if acl != nil && !acl.OperatorRead() {
return permissionDeniedErr
}
status := structs.OperatorHealthReply{
Healthy: true,
}
future := op.srv.raft.GetConfiguration()
if err := future.Error(); err != nil {
return err
}
healthyCount := 0
servers := future.Configuration().Servers
for _, s := range servers {
health := op.srv.getServerHealth(string(s.Address))
if health != nil {
// Fix up StableSince to be more readable
health.StableSince = health.StableSince.Round(time.Second).UTC()
if !health.Healthy {
status.Healthy = false
} else {
healthyCount++
}
status.Servers = append(status.Servers, *health)
}
}
// If we have extra healthy servers, set FailureTolerance
if healthyCount > len(servers)/2+1 {
status.FailureTolerance = healthyCount - (len(servers)/2 + 1)
}
*reply = status
return nil
}

View File

@ -426,3 +426,79 @@ func TestOperator_Autopilot_SetConfiguration_ACLDeny(t *testing.T) {
t.Fatalf("bad: %#v", config)
}
}
func TestOperator_ServerHealth(t *testing.T) {
for i := 1; i <= 3; i++ {
testServerHealth(t, i)
}
}
func testServerHealth(t *testing.T, protocol int) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = true
c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(protocol)
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = false
c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(protocol)
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
dir3, s3 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = false
c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(protocol)
})
defer os.RemoveAll(dir3)
defer s3.Shutdown()
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
testutil.WaitForLeader(t, s1.RPC, "dc1")
testutil.WaitForResult(func() (bool, error) {
arg := structs.DCSpecificRequest{
Datacenter: "dc1",
}
var reply structs.OperatorHealthReply
err := msgpackrpc.CallWithCodec(codec, "Operator.ServerHealth", &arg, &reply)
if err != nil {
return false, fmt.Errorf("err: %v", err)
}
if !reply.Healthy {
return false, fmt.Errorf("bad: %v", reply)
}
if reply.FailureTolerance != 1 {
return false, fmt.Errorf("bad: %v", reply)
}
if len(reply.Servers) != 3 {
return false, fmt.Errorf("bad: %v", reply)
}
if reply.Servers[0].LastContact != "leader" {
return false, fmt.Errorf("bad: %v", reply)
}
if reply.Servers[1].LastContactRaw <= 0 {
return false, fmt.Errorf("bad: %v", reply)
}
if reply.Servers[2].LastContactRaw <= 0 {
return false, fmt.Errorf("bad: %v", reply)
}
return true, nil
}, func(err error) {
t.Fatal(err)
})
}

View File

@ -76,6 +76,12 @@ type Server struct {
// aclCache is the non-authoritative ACL cache.
aclCache *aclCache
// autopilot
autopilotHealth map[string]*structs.ServerHealth
autopilotLock sync.RWMutex
autopilotShutdownCh chan struct{}
autopilotRemoveDeadCh chan struct{}
// Consul configuration
config *Config
@ -222,18 +228,20 @@ func NewServer(config *Config) (*Server, error) {
// Create server.
s := &Server{
config: config,
connPool: NewPool(config.LogOutput, serverRPCCache, serverMaxStreams, tlsWrap),
eventChLAN: make(chan serf.Event, 256),
eventChWAN: make(chan serf.Event, 256),
localConsuls: make(map[raft.ServerAddress]*agent.Server),
logger: logger,
reconcileCh: make(chan serf.Member, 32),
remoteConsuls: make(map[string][]*agent.Server, 4),
rpcServer: rpc.NewServer(),
rpcTLS: incomingTLS,
tombstoneGC: gc,
shutdownCh: make(chan struct{}),
autopilotRemoveDeadCh: make(chan struct{}),
autopilotShutdownCh: make(chan struct{}),
config: config,
connPool: NewPool(config.LogOutput, serverRPCCache, serverMaxStreams, tlsWrap),
eventChLAN: make(chan serf.Event, 256),
eventChWAN: make(chan serf.Event, 256),
localConsuls: make(map[raft.ServerAddress]*agent.Server),
logger: logger,
reconcileCh: make(chan serf.Member, 32),
remoteConsuls: make(map[string][]*agent.Server, 4),
rpcServer: rpc.NewServer(),
rpcTLS: incomingTLS,
tombstoneGC: gc,
shutdownCh: make(chan struct{}),
}
// Initialize the authoritative ACL cache.

View File

@ -1,15 +1,31 @@
package structs
import (
"time"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
)
type AutopilotConfig struct {
// CleanupDeadServers controls whether to remove dead servers when a new
// server is added to the Raft peers
// server is added to the Raft peers.
CleanupDeadServers bool
// RaftIndex stores the create/modify indexes of this configuration
// LastContactThreshold is the limit on the amount of time a server can go
// without leader contact before being considered unhealthy.
LastContactThreshold time.Duration
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
// be behind before being considered unhealthy.
MaxTrailingLogs uint64
// ServerStabilizationTime is the minimum amount of time a server must be
// in a stable, healthy state before it can be added to the cluster. Only
// applicable with Raft protocol version 3 or higher.
ServerStabilizationTime time.Duration
// RaftIndex stores the create/modify indexes of this configuration.
RaftIndex
}
@ -85,3 +101,46 @@ type AutopilotSetConfigRequest struct {
func (op *AutopilotSetConfigRequest) RequestDatacenter() string {
return op.Datacenter
}
// ServerHealth is the health (from the leader's point of view) of a server.
type ServerHealth struct {
// ID is the raft ID of the server.
ID string
// Name is the node name of the server.
Name string
// The status of the SerfHealth check for the server.
SerfStatusRaw serf.MemberStatus `json:"-"`
SerfStatus string
// LastContact is the time since this node's last contact with the leader.
LastContactRaw time.Duration `json:"-"`
LastContact string
// LastTerm is the highest leader term this server has a record of in its Raft log.
LastTerm uint64
// LastIndex is the last log index this server has a record of in its Raft log.
LastIndex uint64
// Healthy is whether or not the server is healthy according to the current
// Autopilot config.
Healthy bool
// StableSince is the amount of time since this server's Healthy value last changed.
StableSince time.Time
}
// OperatorHealthReply is a representation of the overall health of the cluster
type OperatorHealthReply struct {
// Healthy is true if all the servers in the cluster are healthy.
Healthy bool
// FailureTolerance is the number of healthy servers that could be lost without
// an outage occurring.
FailureTolerance int
// Servers holds the health of each server.
Servers []ServerHealth
}

View File

@ -29,6 +29,7 @@ The following endpoints are supported:
* [`/v1/operator/raft/peer`](#raft-peer): Operates on Raft peers
* [`/v1/operator/keyring`](#keyring): Operates on gossip keyring
* [`/v1/operator/autopilot/configuration`](#autopilot-configuration): Operates on the Autopilot configuration
* [`/v1/operator/autopilot/health`](#autopilot-health): Returns the health of the servers
Not all endpoints support blocking queries and all consistency modes,
see details in the sections below.
@ -288,13 +289,16 @@ A JSON body is returned that looks like this:
```javascript
{
"CleanupDeadServers": true,
"LastContactThreshold": 200000000,
"MaxTrailingLogs": 250,
"ServerStabilizationTime": 10000000000,
"CreateIndex": 4,
"ModifyIndex": 4
}
```
`CleanupDeadServers` is whether dead servers should be removed automatically when
a new server is added to the cluster.
For more information about the Autopilot configuration options, see the agent configuration section
[here](/docs/agent/options.html#autopilot).
#### PUT Method
@ -313,11 +317,89 @@ body must look like:
```javascript
{
"CleanupDeadServers": true
"CleanupDeadServers": true,
"LastContactThreshold": 200000000,
"MaxTrailingLogs": 250,
"ServerStabilizationTime": 10000000000,
"CreateIndex": 4,
"ModifyIndex": 4
}
```
`CleanupDeadServers` is whether dead servers should be removed automatically when
a new server is added to the cluster.
For more information about the Autopilot configuration options, see the agent configuration section
[here](/docs/agent/options.html#autopilot).
The return code will indicate success or failure.
### <a name="autopilot-health"></a> /v1/operator/autopilot/health
Available in Consul 0.8.0 and later, the autopilot health endpoint supports the
`GET` method.
This endpoint supports the use of ACL tokens using either the `X-CONSUL-TOKEN`
header or the `?token=` query parameter.
By default, the datacenter of the agent is queried; however, the `dc` can be
provided using the `?dc=` query parameter.
#### GET Method
When using the `GET` method, the request will be forwarded to the cluster
leader to retrieve its latest Autopilot configuration.
If ACLs are enabled, the client will need to supply an ACL Token with
[`operator`](/docs/internals/acl.html#operator) read privileges.
A JSON body is returned that looks like this:
```javascript
{
"Healthy": true,
"FailureTolerance": 0,
"Servers": [
{
"ID": "e349749b-3303-3ddf-959c-b5885a0e1f6e",
"Name": "node1",
"SerfStatus": "alive",
"LastContact": "leader",
"LastTerm": 2,
"LastIndex": 46,
"Healthy": true,
"StableSince": "2017-03-06T22:07:51Z"
},
{
"ID": "e36ee410-cc3c-0a0c-c724-63817ab30303",
"Name": "node2",
"SerfStatus": "alive",
"LastContact": "27.291304ms",
"LastTerm": 2,
"LastIndex": 46,
"Healthy": true,
"StableSince": "2017-03-06T22:18:26Z"
}
]
}
```
`Healthy` is whether all the servers are currently heathly.
`FailureTolerance` is the number of redundant healthy servers that could be fail
without causing an outage (this would be 2 in a healthy cluster of 5 servers).
The `Servers` list holds detailed health information on each server:
- `ID` is the Raft ID of the server.
- `Name` is the node name of the server.
- `SerfStatus` is the SerfHealth check status for the server.
- `LastContact` is the time elapsed since this server's last contact with the leader.
- `LastTerm` is the server's last known Raft leader term.
- `LastIndex` is the index of the server's last committed Raft log entry.
- `Healthy` is whether the server is healthy according to the current Autopilot configuration.
- `StableSince` is the time this server has been in its current `Healthy` state.

View File

@ -311,6 +311,11 @@ will exit with an error at startup.
use. This defaults to the latest version. This should be set only when [upgrading](/docs/upgrading.html).
You can view the protocol versions supported by Consul by running `consul -v`.
* <a name="_raft_protocol"></a><a href="#_raft_protocol">`-raft_protocol`</a> - This controls the internal
version of the Raft consensus protocol used for server communications. This defaults to 2 but must
be set to 3 in order to gain access to Autopilot features, with the exception of
[`cleanup_dead_servers`](#cleanup_dead_servers).
* <a name="_recursor"></a><a href="#_recursor">`-recursor`</a> - Specifies the address of an upstream DNS
server. This option may be provided multiple times, and is functionally
equivalent to the [`recursors` configuration option](#recursors).
@ -556,14 +561,22 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
<br><br>
The following sub-keys are available:
* <a name="raft_protocol"></a><a href="#raft_protocol">`raft_protocol`</a> - This controls the internal
version of the Raft consensus protocol used for server communications. This defaults to 2 but must
be set to 3 in order to gain access to other Autopilot features, with the exception of
[`cleanup_dead_servers`](#cleanup_dead_servers).
* <a name="cleanup_dead_servers"></a><a href="#cleanup_dead_servers">`cleanup_dead_servers`</a> - This controls
the automatic removal of dead server nodes whenever a new server is added to the cluster. Defaults to `true`.
* <a name="last_contact_threshold"></a><a href="#last_contact_threshold">`last_contact_threshold`</a> - Controls
the maximum amount of time a server can go without contact from the leader before being considered unhealthy.
Must be a duration value such as `10s`. Defaults to `200ms`.
* <a name="max_trailing_threshold"></a><a href="#max_trailing_threshold">`max_trailing_threshold`</a> - Controls
the maximum number of log entries that a server can trail the leader by before being considered unhealthy. Defaults
to 250.
* <a name="server_stabilization_time"></a><a href="#server_stabilization_time">`server_stabilization_time`</a> -
Controls the minimum amount of time a server must be stable in the 'healthy' state before being added to the
cluster. Only takes effect if all servers are running Raft protocol version 3 or higher. Must be a duration value
such as `10s`. Defaults to `30s`.
* <a name="bootstrap"></a><a href="#bootstrap">`bootstrap`</a> Equivalent to the
[`-bootstrap` command-line flag](#_bootstrap).
@ -769,6 +782,9 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
* <a name="protocol"></a><a href="#protocol">`protocol`</a> Equivalent to the
[`-protocol` command-line flag](#_protocol).
* <a name="raft_protocol"></a><a href="#raft_protocol">`raft_protocol`</a> Equivalent to the
[`-raft-protocol` command-line flag](#_raft_protocol).
* <a name="reap"></a><a href="#reap">`reap`</a> This controls Consul's automatic reaping of child processes,
which is useful if Consul is running as PID 1 in a Docker container. If this isn't specified, then Consul will
automatically reap child processes if it detects it is running as PID 1. If this is set to true or false, then

View File

@ -40,6 +40,9 @@ The output looks like this:
```
CleanupDeadServers = true
LastContactThreshold = 200ms
MaxTrailingLogs = 250
ServerStabilizationTime = 10s
```
## set-config
@ -58,6 +61,16 @@ Usage: `consul operator autopilot set-config [options]`
* `-cleanup-dead-servers` - Specifies whether to enable automatic removal of dead servers
upon the successful joining of new servers to the cluster. Must be one of `[true|false]`.
* `last-contact-threshold` - Controls the maximum amount of time a server can go without contact
from the leader before being considered unhealthy. Must be a duration value such as `10s`.
* `max-trailing-logs` - Controls the maximum number of log entries that a server can trail
the leader by before being considered unhealthy.
* `server-stabilization-time` - Controls the minimum amount of time a server must be stable in
the 'healthy' state before being added to the cluster. Only takes effect if all servers are
running Raft protocol version 3 or higher. Must be a duration value such as `10s`.
The output looks like this:
```