diff --git a/agent/acl.go b/agent/acl.go index 4245c53a05..0bf4180eb4 100644 --- a/agent/acl.go +++ b/agent/acl.go @@ -143,11 +143,9 @@ func (m *aclManager) lookupACL(a *Agent, id string) (acl.ACL, error) { cached = raw.(*aclCacheEntry) } if cached != nil && time.Now().Before(cached.Expires) { - metrics.IncrCounter([]string{"consul", "acl", "cache_hit"}, 1) metrics.IncrCounter([]string{"acl", "cache_hit"}, 1) return cached.ACL, nil } - metrics.IncrCounter([]string{"consul", "acl", "cache_miss"}, 1) metrics.IncrCounter([]string{"acl", "cache_miss"}, 1) // At this point we might have a stale cached ACL, or none at all, so diff --git a/agent/config/builder.go b/agent/config/builder.go index 7318e7ab32..43f3a141d2 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -511,14 +511,6 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { } } - // Add a filter rule if needed for enabling the deprecated metric names - enableDeprecatedNames := b.boolVal(c.Telemetry.EnableDeprecatedNames) - if enableDeprecatedNames { - telemetryAllowedPrefixes = append(telemetryAllowedPrefixes, "consul.consul.") - } else { - telemetryBlockedPrefixes = append(telemetryBlockedPrefixes, "consul.consul.") - } - // raft performance scaling performanceRaftMultiplier := b.intVal(c.Performance.RaftMultiplier) if performanceRaftMultiplier < 1 || uint(performanceRaftMultiplier) > consul.MaxRaftMultiplier { diff --git a/agent/config/config.go b/agent/config/config.go index e92b76a453..cd06235ae7 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -394,7 +394,6 @@ type Telemetry struct { PrometheusRetentionTime *string `json:"prometheus_retention_time,omitempty" hcl:"prometheus_retention_time" mapstructure:"prometheus_retention_time"` StatsdAddr *string `json:"statsd_address,omitempty" hcl:"statsd_address" mapstructure:"statsd_address"` StatsiteAddr *string `json:"statsite_address,omitempty" hcl:"statsite_address" mapstructure:"statsite_address"` - EnableDeprecatedNames *bool `json:"enable_deprecated_names" hcl:"enable_deprecated_names" mapstructure:"enable_deprecated_names"` } type Ports struct { diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index cc8deca9ba..f7e304141f 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -1811,28 +1811,10 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { patch: func(rt *RuntimeConfig) { rt.DataDir = dataDir rt.TelemetryAllowedPrefixes = []string{"foo"} - rt.TelemetryBlockedPrefixes = []string{"bar", "consul.consul."} + rt.TelemetryBlockedPrefixes = []string{"bar"} }, warns: []string{`Filter rule must begin with either '+' or '-': "nix"`}, }, - { - desc: "telemetry.enable_deprecated_names adds allow rule for whitelist", - args: []string{ - `-data-dir=` + dataDir, - }, - json: []string{`{ - "telemetry": { "enable_deprecated_names": true, "filter_default": false } - }`}, - hcl: []string{` - telemetry = { enable_deprecated_names = true filter_default = false } - `}, - patch: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - rt.TelemetryFilterDefault = false - rt.TelemetryAllowedPrefixes = []string{"consul.consul."} - rt.TelemetryBlockedPrefixes = []string{} - }, - }, { desc: "encrypt has invalid key", args: []string{ @@ -2620,7 +2602,6 @@ func TestFullConfig(t *testing.T) { "dogstatsd_tags": [ "3N81zSUB","Xtj8AnXZ" ], "filter_default": true, "prefix_filter": [ "+oJotS8XJ","-cazlEhGn" ], - "enable_deprecated_names": true, "metrics_prefix": "ftO6DySn", "prometheus_retention_time": "15s", "statsd_address": "drce87cy", @@ -3052,7 +3033,6 @@ func TestFullConfig(t *testing.T) { dogstatsd_tags = [ "3N81zSUB","Xtj8AnXZ" ] filter_default = true prefix_filter = [ "+oJotS8XJ","-cazlEhGn" ] - enable_deprecated_names = true metrics_prefix = "ftO6DySn" prometheus_retention_time = "15s" statsd_address = "drce87cy" @@ -3606,7 +3586,7 @@ func TestFullConfig(t *testing.T) { TelemetryDogstatsdAddr: "0wSndumK", TelemetryDogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"}, TelemetryFilterDefault: true, - TelemetryAllowedPrefixes: []string{"oJotS8XJ", "consul.consul."}, + TelemetryAllowedPrefixes: []string{"oJotS8XJ"}, TelemetryBlockedPrefixes: []string{"cazlEhGn"}, TelemetryMetricsPrefix: "ftO6DySn", TelemetryPrometheusRetentionTime: 15 * time.Second, diff --git a/agent/consul/acl.go b/agent/consul/acl.go index eb14e1dabc..1e95e62e44 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -41,7 +41,6 @@ type aclCacheEntry struct { // assumes its running in the ACL datacenter, or in a non-ACL datacenter when // using its replicated ACLs during an outage. func (s *Server) aclLocalFault(id string) (string, string, error) { - defer metrics.MeasureSince([]string{"consul", "acl", "fault"}, time.Now()) defer metrics.MeasureSince([]string{"acl", "fault"}, time.Now()) // Query the state store. @@ -75,7 +74,6 @@ func (s *Server) resolveToken(id string) (acl.ACL, error) { if len(authDC) == 0 { return nil, nil } - defer metrics.MeasureSince([]string{"consul", "acl", "resolveToken"}, time.Now()) defer metrics.MeasureSince([]string{"acl", "resolveToken"}, time.Now()) // Handle the anonymous token @@ -159,11 +157,9 @@ func (c *aclCache) lookupACL(id, authDC string) (acl.ACL, error) { // Check for live cache. if cached != nil && time.Now().Before(cached.Expires) { - metrics.IncrCounter([]string{"consul", "acl", "cache_hit"}, 1) metrics.IncrCounter([]string{"acl", "cache_hit"}, 1) return cached.ACL, nil } - metrics.IncrCounter([]string{"consul", "acl", "cache_miss"}, 1) metrics.IncrCounter([]string{"acl", "cache_miss"}, 1) // Attempt to refresh the policy from the ACL datacenter via an RPC. @@ -226,7 +222,6 @@ func (c *aclCache) lookupACL(id, authDC string) (acl.ACL, error) { // Fake up an ACL datacenter reply and inject it into the cache. // Note we use the local TTL here, so this'll be used for that // amount of time even once the ACL datacenter becomes available. - metrics.IncrCounter([]string{"consul", "acl", "replication_hit"}, 1) metrics.IncrCounter([]string{"acl", "replication_hit"}, 1) reply.ETag = makeACLETag(parent, policy) reply.TTL = c.config.ACLTTL diff --git a/agent/consul/acl_endpoint.go b/agent/consul/acl_endpoint.go index 2a8b4b2340..c4f1d07a70 100644 --- a/agent/consul/acl_endpoint.go +++ b/agent/consul/acl_endpoint.go @@ -145,7 +145,6 @@ func (a *ACL) Apply(args *structs.ACLRequest, reply *string) error { if done, err := a.srv.forward("ACL.Apply", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "acl", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"acl", "apply"}, time.Now()) // Verify we are allowed to serve this request diff --git a/agent/consul/acl_replication.go b/agent/consul/acl_replication.go index 38d5f25fc5..b008aa87ac 100644 --- a/agent/consul/acl_replication.go +++ b/agent/consul/acl_replication.go @@ -149,7 +149,6 @@ func (s *Server) fetchLocalACLs() (structs.ACLs, error) { // datacenter. The lastIndex parameter is a hint about which remote index we // have replicated to, so this is expected to block until something changes. func (s *Server) fetchRemoteACLs(lastRemoteIndex uint64) (*structs.IndexedACLs, error) { - defer metrics.MeasureSince([]string{"consul", "leader", "fetchRemoteACLs"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "fetchRemoteACLs"}, time.Now()) args := structs.DCSpecificRequest{ @@ -170,7 +169,6 @@ func (s *Server) fetchRemoteACLs(lastRemoteIndex uint64) (*structs.IndexedACLs, // UpdateLocalACLs is given a list of changes to apply in order to bring the // local ACLs in-line with the remote ACLs from the ACL datacenter. func (s *Server) updateLocalACLs(changes structs.ACLRequests) error { - defer metrics.MeasureSince([]string{"consul", "leader", "updateLocalACLs"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "updateLocalACLs"}, time.Now()) minTimePerOp := time.Second / time.Duration(s.config.ACLReplicationApplyLimit) @@ -218,7 +216,6 @@ func (s *Server) replicateACLs(lastRemoteIndex uint64) (uint64, error) { // Measure everything after the remote query, which can block for long // periods of time. This metric is a good measure of how expensive the // replication process is. - defer metrics.MeasureSince([]string{"consul", "leader", "replicateACLs"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "replicateACLs"}, time.Now()) local, err := s.fetchLocalACLs() diff --git a/agent/consul/autopilot.go b/agent/consul/autopilot.go index 5c5cf6ffed..59741efc91 100644 --- a/agent/consul/autopilot.go +++ b/agent/consul/autopilot.go @@ -55,13 +55,10 @@ func (d *AutopilotDelegate) IsServer(m serf.Member) (*autopilot.ServerInfo, erro // Heartbeat a metric for monitoring if we're the leader func (d *AutopilotDelegate) NotifyHealth(health autopilot.OperatorHealthReply) { if d.server.raft.State() == raft.Leader { - metrics.SetGauge([]string{"consul", "autopilot", "failure_tolerance"}, float32(health.FailureTolerance)) metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(health.FailureTolerance)) if health.Healthy { - metrics.SetGauge([]string{"consul", "autopilot", "healthy"}, 1) metrics.SetGauge([]string{"autopilot", "healthy"}, 1) } else { - metrics.SetGauge([]string{"consul", "autopilot", "healthy"}, 0) metrics.SetGauge([]string{"autopilot", "healthy"}, 0) } } diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 5cfb3ff513..52ba5fb1b7 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -24,7 +24,6 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error if done, err := c.srv.forward("Catalog.Register", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "catalog", "register"}, time.Now()) defer metrics.MeasureSince([]string{"catalog", "register"}, time.Now()) // Verify the args. @@ -117,7 +116,6 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e if done, err := c.srv.forward("Catalog.Deregister", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "catalog", "deregister"}, time.Now()) defer metrics.MeasureSince([]string{"catalog", "deregister"}, time.Now()) // Verify the args @@ -279,19 +277,13 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru // Provide some metrics if err == nil { - metrics.IncrCounterWithLabels([]string{"consul", "catalog", "service", "query"}, 1, - []metrics.Label{{Name: "service", Value: args.ServiceName}}) metrics.IncrCounterWithLabels([]string{"catalog", "service", "query"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}}) if args.ServiceTag != "" { - metrics.IncrCounterWithLabels([]string{"consul", "catalog", "service", "query-tag"}, 1, - []metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}}) metrics.IncrCounterWithLabels([]string{"catalog", "service", "query-tag"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}}) } if len(reply.ServiceNodes) == 0 { - metrics.IncrCounterWithLabels([]string{"consul", "catalog", "service", "not-found"}, 1, - []metrics.Label{{Name: "service", Value: args.ServiceName}}) metrics.IncrCounterWithLabels([]string{"catalog", "service", "not-found"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}}) } diff --git a/agent/consul/client.go b/agent/consul/client.go index 96baeb1748..f3d5fc6bbb 100644 --- a/agent/consul/client.go +++ b/agent/consul/client.go @@ -249,10 +249,8 @@ TRY: } // Enforce the RPC limit. - metrics.IncrCounter([]string{"consul", "client", "rpc"}, 1) metrics.IncrCounter([]string{"client", "rpc"}, 1) if !c.rpcLimiter.Allow() { - metrics.IncrCounter([]string{"consul", "client", "rpc", "exceeded"}, 1) metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1) return structs.ErrRPCRateExceeded } @@ -293,10 +291,8 @@ func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io } // Enforce the RPC limit. - metrics.IncrCounter([]string{"consul", "client", "rpc"}, 1) metrics.IncrCounter([]string{"client", "rpc"}, 1) if !c.rpcLimiter.Allow() { - metrics.IncrCounter([]string{"consul", "client", "rpc", "exceeded"}, 1) metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1) return structs.ErrRPCRateExceeded } diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index 2029f1723e..ede04eef6a 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -23,7 +23,6 @@ func init() { } func (c *FSM) applyRegister(buf []byte, index uint64) interface{} { - defer metrics.MeasureSince([]string{"consul", "fsm", "register"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "register"}, time.Now()) var req structs.RegisterRequest if err := structs.Decode(buf, &req); err != nil { @@ -39,7 +38,6 @@ func (c *FSM) applyRegister(buf []byte, index uint64) interface{} { } func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} { - defer metrics.MeasureSince([]string{"consul", "fsm", "deregister"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "deregister"}, time.Now()) var req structs.DeregisterRequest if err := structs.Decode(buf, &req); err != nil { @@ -73,8 +71,6 @@ func (c *FSM) applyKVSOperation(buf []byte, index uint64) interface{} { if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } - defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "kvs"}, time.Now(), - []metrics.Label{{Name: "op", Value: string(req.Op)}}) defer metrics.MeasureSinceWithLabels([]string{"fsm", "kvs"}, time.Now(), []metrics.Label{{Name: "op", Value: string(req.Op)}}) switch req.Op { @@ -120,8 +116,6 @@ func (c *FSM) applySessionOperation(buf []byte, index uint64) interface{} { if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } - defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "session"}, time.Now(), - []metrics.Label{{Name: "op", Value: string(req.Op)}}) defer metrics.MeasureSinceWithLabels([]string{"fsm", "session"}, time.Now(), []metrics.Label{{Name: "op", Value: string(req.Op)}}) switch req.Op { @@ -143,8 +137,6 @@ func (c *FSM) applyACLOperation(buf []byte, index uint64) interface{} { if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } - defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "acl"}, time.Now(), - []metrics.Label{{Name: "op", Value: string(req.Op)}}) defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl"}, time.Now(), []metrics.Label{{Name: "op", Value: string(req.Op)}}) switch req.Op { @@ -177,8 +169,6 @@ func (c *FSM) applyTombstoneOperation(buf []byte, index uint64) interface{} { if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } - defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "tombstone"}, time.Now(), - []metrics.Label{{Name: "op", Value: string(req.Op)}}) defer metrics.MeasureSinceWithLabels([]string{"fsm", "tombstone"}, time.Now(), []metrics.Label{{Name: "op", Value: string(req.Op)}}) switch req.Op { @@ -199,7 +189,6 @@ func (c *FSM) applyCoordinateBatchUpdate(buf []byte, index uint64) interface{} { if err := structs.Decode(buf, &updates); err != nil { panic(fmt.Errorf("failed to decode batch updates: %v", err)) } - defer metrics.MeasureSince([]string{"consul", "fsm", "coordinate", "batch-update"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "coordinate", "batch-update"}, time.Now()) if err := c.state.CoordinateBatchUpdate(index, updates); err != nil { return err @@ -215,8 +204,6 @@ func (c *FSM) applyPreparedQueryOperation(buf []byte, index uint64) interface{} panic(fmt.Errorf("failed to decode request: %v", err)) } - defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "prepared-query"}, time.Now(), - []metrics.Label{{Name: "op", Value: string(req.Op)}}) defer metrics.MeasureSinceWithLabels([]string{"fsm", "prepared-query"}, time.Now(), []metrics.Label{{Name: "op", Value: string(req.Op)}}) switch req.Op { @@ -235,7 +222,6 @@ func (c *FSM) applyTxn(buf []byte, index uint64) interface{} { if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } - defer metrics.MeasureSince([]string{"consul", "fsm", "txn"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "txn"}, time.Now()) results, errors := c.state.TxnRW(index, req.Ops) return structs.TxnResponse{ @@ -249,7 +235,6 @@ func (c *FSM) applyAutopilotUpdate(buf []byte, index uint64) interface{} { if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } - defer metrics.MeasureSince([]string{"consul", "fsm", "autopilot"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "autopilot"}, time.Now()) if req.CAS { diff --git a/agent/consul/fsm/snapshot.go b/agent/consul/fsm/snapshot.go index 3721f07569..51b58d2906 100644 --- a/agent/consul/fsm/snapshot.go +++ b/agent/consul/fsm/snapshot.go @@ -57,7 +57,6 @@ func registerRestorer(msg structs.MessageType, fn restorer) { // Persist saves the FSM snapshot out to the given sink. func (s *snapshot) Persist(sink raft.SnapshotSink) error { - defer metrics.MeasureSince([]string{"consul", "fsm", "persist"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "persist"}, time.Now()) // Write the header diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index 1cf7460444..db59356c86 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -139,19 +139,13 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc // Provide some metrics if err == nil { - metrics.IncrCounterWithLabels([]string{"consul", "health", "service", "query"}, 1, - []metrics.Label{{Name: "service", Value: args.ServiceName}}) metrics.IncrCounterWithLabels([]string{"health", "service", "query"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}}) if args.ServiceTag != "" { - metrics.IncrCounterWithLabels([]string{"consul", "health", "service", "query-tag"}, 1, - []metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}}) metrics.IncrCounterWithLabels([]string{"health", "service", "query-tag"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}}) } if len(reply.Nodes) == 0 { - metrics.IncrCounterWithLabels([]string{"consul", "health", "service", "not-found"}, 1, - []metrics.Label{{Name: "service", Value: args.ServiceName}}) metrics.IncrCounterWithLabels([]string{"health", "service", "not-found"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}}) } diff --git a/agent/consul/kvs_endpoint.go b/agent/consul/kvs_endpoint.go index ced5c4119b..b5f3fbb574 100644 --- a/agent/consul/kvs_endpoint.go +++ b/agent/consul/kvs_endpoint.go @@ -81,7 +81,6 @@ func (k *KVS) Apply(args *structs.KVSRequest, reply *bool) error { if done, err := k.srv.forward("KVS.Apply", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "kvs", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"kvs", "apply"}, time.Now()) // Perform the pre-apply checks. diff --git a/agent/consul/leader.go b/agent/consul/leader.go index f5a1f237e9..d950d71bac 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -116,7 +116,6 @@ RECONCILE: s.logger.Printf("[ERR] consul: failed to wait for barrier: %v", err) goto WAIT } - metrics.MeasureSince([]string{"consul", "leader", "barrier"}, start) metrics.MeasureSince([]string{"leader", "barrier"}, start) // Check if we need to handle initial leadership actions @@ -183,7 +182,6 @@ WAIT: // previously inflight transactions have been committed and that our // state is up-to-date. func (s *Server) establishLeadership() error { - defer metrics.MeasureSince([]string{"consul", "leader", "establish_leadership"}, time.Now()) // This will create the anonymous token and master token (if that is // configured). if err := s.initializeACL(); err != nil { @@ -219,7 +217,6 @@ func (s *Server) establishLeadership() error { // revokeLeadership is invoked once we step down as leader. // This is used to cleanup any state that may be specific to a leader. func (s *Server) revokeLeadership() error { - defer metrics.MeasureSince([]string{"consul", "leader", "revoke_leadership"}, time.Now()) // Disable the tombstone GC, since it is only useful as a leader s.tombstoneGC.SetEnabled(false) @@ -444,7 +441,6 @@ func (s *Server) reconcileMember(member serf.Member) error { s.logger.Printf("[WARN] consul: skipping reconcile of node %v", member) return nil } - defer metrics.MeasureSince([]string{"consul", "leader", "reconcileMember"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "reconcileMember"}, time.Now()) var err error switch member.Status { @@ -805,7 +801,6 @@ func (s *Server) removeConsulServer(m serf.Member, port int) error { // through Raft to ensure consistency. We do this outside the leader loop // to avoid blocking. func (s *Server) reapTombstones(index uint64) { - defer metrics.MeasureSince([]string{"consul", "leader", "reapTombstones"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "reapTombstones"}, time.Now()) req := structs.TombstoneRequest{ Datacenter: s.config.Datacenter, diff --git a/agent/consul/prepared_query_endpoint.go b/agent/consul/prepared_query_endpoint.go index ee6061a84e..a56ddd35a7 100644 --- a/agent/consul/prepared_query_endpoint.go +++ b/agent/consul/prepared_query_endpoint.go @@ -32,7 +32,6 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string) if done, err := p.srv.forward("PreparedQuery.Apply", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "prepared-query", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"prepared-query", "apply"}, time.Now()) // Validate the ID. We must create new IDs before applying to the Raft @@ -287,7 +286,6 @@ func (p *PreparedQuery) Explain(args *structs.PreparedQueryExecuteRequest, if done, err := p.srv.forward("PreparedQuery.Explain", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "prepared-query", "explain"}, time.Now()) defer metrics.MeasureSince([]string{"prepared-query", "explain"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. @@ -335,7 +333,6 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, if done, err := p.srv.forward("PreparedQuery.Execute", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute"}, time.Now()) defer metrics.MeasureSince([]string{"prepared-query", "execute"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. @@ -471,7 +468,6 @@ func (p *PreparedQuery) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRe if done, err := p.srv.forward("PreparedQuery.ExecuteRemote", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute_remote"}, time.Now()) defer metrics.MeasureSince([]string{"prepared-query", "execute_remote"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. diff --git a/agent/consul/rpc.go b/agent/consul/rpc.go index 678b805140..fcde830b0c 100644 --- a/agent/consul/rpc.go +++ b/agent/consul/rpc.go @@ -59,7 +59,6 @@ func (s *Server) listen(listener net.Listener) { } go s.handleConn(conn, false) - metrics.IncrCounter([]string{"consul", "rpc", "accept_conn"}, 1) metrics.IncrCounter([]string{"rpc", "accept_conn"}, 1) } } @@ -97,7 +96,6 @@ func (s *Server) handleConn(conn net.Conn, isTLS bool) { s.handleConsulConn(conn) case pool.RPCRaft: - metrics.IncrCounter([]string{"consul", "rpc", "raft_handoff"}, 1) metrics.IncrCounter([]string{"rpc", "raft_handoff"}, 1) s.raftLayer.Handoff(conn) @@ -156,12 +154,10 @@ func (s *Server) handleConsulConn(conn net.Conn) { if err := s.rpcServer.ServeRequest(rpcCodec); err != nil { if err != io.EOF && !strings.Contains(err.Error(), "closed") { s.logger.Printf("[ERR] consul.rpc: RPC error: %v %s", err, logConn(conn)) - metrics.IncrCounter([]string{"consul", "rpc", "request_error"}, 1) metrics.IncrCounter([]string{"rpc", "request_error"}, 1) } return } - metrics.IncrCounter([]string{"consul", "rpc", "request"}, 1) metrics.IncrCounter([]string{"rpc", "request"}, 1) } } @@ -288,8 +284,6 @@ func (s *Server) forwardDC(method, dc string, args interface{}, reply interface{ return structs.ErrNoDCPath } - metrics.IncrCounterWithLabels([]string{"consul", "rpc", "cross-dc"}, 1, - []metrics.Label{{Name: "datacenter", Value: dc}}) metrics.IncrCounterWithLabels([]string{"rpc", "cross-dc"}, 1, []metrics.Label{{Name: "datacenter", Value: dc}}) if err := s.connPool.RPC(dc, server.Addr, server.Version, method, server.UseTLS, args, reply); err != nil { @@ -401,7 +395,6 @@ RUN_QUERY: } // Run the query. - metrics.IncrCounter([]string{"consul", "rpc", "query"}, 1) metrics.IncrCounter([]string{"rpc", "query"}, 1) // Operate on a consistent set of state. This makes sure that the @@ -452,7 +445,6 @@ func (s *Server) setQueryMeta(m *structs.QueryMeta) { // consistentRead is used to ensure we do not perform a stale // read. This is done by verifying leadership before the read. func (s *Server) consistentRead() error { - defer metrics.MeasureSince([]string{"consul", "rpc", "consistentRead"}, time.Now()) defer metrics.MeasureSince([]string{"rpc", "consistentRead"}, time.Now()) future := s.raft.VerifyLeader() if err := future.Error(); err != nil { diff --git a/agent/consul/segment_oss.go b/agent/consul/segment_oss.go index 44f447124b..21a89e807b 100644 --- a/agent/consul/segment_oss.go +++ b/agent/consul/segment_oss.go @@ -59,7 +59,6 @@ func (s *Server) floodSegments(config *Config) { // all live nodes are registered, all failed nodes are marked as such, and all // left nodes are de-registered. func (s *Server) reconcile() (err error) { - defer metrics.MeasureSince([]string{"consul", "leader", "reconcile"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "reconcile"}, time.Now()) members := s.serfLAN.Members() knownMembers := make(map[string]struct{}) diff --git a/agent/consul/session_endpoint.go b/agent/consul/session_endpoint.go index 54bb32f81a..3817460b25 100644 --- a/agent/consul/session_endpoint.go +++ b/agent/consul/session_endpoint.go @@ -23,7 +23,6 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error { if done, err := s.srv.forward("Session.Apply", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "session", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"session", "apply"}, time.Now()) // Verify the args @@ -222,7 +221,6 @@ func (s *Session) Renew(args *structs.SessionSpecificRequest, if done, err := s.srv.forward("Session.Renew", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "session", "renew"}, time.Now()) defer metrics.MeasureSince([]string{"session", "renew"}, time.Now()) // Get the session, from local state. diff --git a/agent/consul/session_ttl.go b/agent/consul/session_ttl.go index 99247366f4..71265f0b1d 100644 --- a/agent/consul/session_ttl.go +++ b/agent/consul/session_ttl.go @@ -84,7 +84,6 @@ func (s *Server) createSessionTimer(id string, ttl time.Duration) { // invalidateSession is invoked when a session TTL is reached and we // need to invalidate the session. func (s *Server) invalidateSession(id string) { - defer metrics.MeasureSince([]string{"consul", "session_ttl", "invalidate"}, time.Now()) defer metrics.MeasureSince([]string{"session_ttl", "invalidate"}, time.Now()) // Clear the session timer @@ -134,7 +133,6 @@ func (s *Server) sessionStats() { for { select { case <-time.After(5 * time.Second): - metrics.SetGauge([]string{"consul", "session_ttl", "active"}, float32(s.sessionTimers.Len())) metrics.SetGauge([]string{"session_ttl", "active"}, float32(s.sessionTimers.Len())) case <-s.shutdownCh: diff --git a/agent/consul/txn_endpoint.go b/agent/consul/txn_endpoint.go index 7085548c8a..1822a74974 100644 --- a/agent/consul/txn_endpoint.go +++ b/agent/consul/txn_endpoint.go @@ -46,7 +46,6 @@ func (t *Txn) Apply(args *structs.TxnRequest, reply *structs.TxnResponse) error if done, err := t.srv.forward("Txn.Apply", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "txn", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"txn", "apply"}, time.Now()) // Run the pre-checks before we send the transaction into Raft. @@ -90,7 +89,6 @@ func (t *Txn) Read(args *structs.TxnReadRequest, reply *structs.TxnReadResponse) if done, err := t.srv.forward("Txn.Read", args, args, reply); done { return err } - defer metrics.MeasureSince([]string{"consul", "txn", "read"}, time.Now()) defer metrics.MeasureSince([]string{"txn", "read"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. diff --git a/agent/dns.go b/agent/dns.go index 6211e71b95..f1c0d8bda7 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -158,8 +158,6 @@ START: func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { q := req.Question[0] defer func(s time.Time) { - metrics.MeasureSinceWithLabels([]string{"consul", "dns", "ptr_query"}, s, - []metrics.Label{{Name: "node", Value: d.agent.config.NodeName}}) metrics.MeasureSinceWithLabels([]string{"dns", "ptr_query"}, s, []metrics.Label{{Name: "node", Value: d.agent.config.NodeName}}) d.logger.Printf("[DEBUG] dns: request for %v (%v) from client %s (%s)", @@ -230,8 +228,6 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) { q := req.Question[0] defer func(s time.Time) { - metrics.MeasureSinceWithLabels([]string{"consul", "dns", "domain_query"}, s, - []metrics.Label{{Name: "node", Value: d.agent.config.NodeName}}) metrics.MeasureSinceWithLabels([]string{"dns", "domain_query"}, s, []metrics.Label{{Name: "node", Value: d.agent.config.NodeName}}) d.logger.Printf("[DEBUG] dns: request for name %v type %v class %v (took %v) from client %s (%s)", @@ -542,7 +538,6 @@ RPC: d.logger.Printf("[WARN] dns: Query results too stale, re-requesting") goto RPC } else if out.LastContact > staleCounterThreshold { - metrics.IncrCounter([]string{"consul", "dns", "stale_queries"}, 1) metrics.IncrCounter([]string{"dns", "stale_queries"}, 1) } } @@ -891,7 +886,6 @@ func (d *DNSServer) lookupServiceNodes(datacenter, service, tag string) (structs } if args.AllowStale && out.LastContact > staleCounterThreshold { - metrics.IncrCounter([]string{"consul", "dns", "stale_queries"}, 1) metrics.IncrCounter([]string{"dns", "stale_queries"}, 1) } @@ -1042,7 +1036,6 @@ RPC: d.logger.Printf("[WARN] dns: Query results too stale, re-requesting") goto RPC } else if out.LastContact > staleCounterThreshold { - metrics.IncrCounter([]string{"consul", "dns", "stale_queries"}, 1) metrics.IncrCounter([]string{"dns", "stale_queries"}, 1) } } diff --git a/agent/http.go b/agent/http.go index 9d331a034e..9ef9ab6b0b 100644 --- a/agent/http.go +++ b/agent/http.go @@ -110,7 +110,6 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler { start := time.Now() handler(resp, req) key := append([]string{"http", req.Method}, parts...) - metrics.MeasureSince(append([]string{"consul"}, key...), start) metrics.MeasureSince(key, start) } diff --git a/website/source/docs/agent/options.html.md b/website/source/docs/agent/options.html.md index 9ee1f5e0b6..56545d0eab 100644 --- a/website/source/docs/agent/options.html.md +++ b/website/source/docs/agent/options.html.md @@ -1340,10 +1340,6 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass The format is compatible natively with prometheus. When running in this mode, it is recommended to also enable the option `disable_hostname` to avoid having prefixed metrics with hostname. - * `enable_deprecated_names` - Added in Consul 1.0, this enables old metric names of the format `consul.consul...` to be sent alongside - other metrics. Defaults to false. - * `statsd_address` This provides the address of a statsd instance in the format `host:port`. If provided, Consul will send various telemetry information to that instance for aggregation. This can be used to capture runtime information. This sends UDP packets only and can be used with