Merge branch 'main' of github.com:hashicorp/consul into docs-tables-service-discovery-services

pull/10892/head
trujillo-adam 2021-08-24 11:26:32 -07:00
commit 4c7eab88b9
234 changed files with 4227 additions and 2103 deletions

3
.changelog/10804.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
debug: rename cluster capture target to members, to be more consistent with the terms used by the API.
```

3
.changelog/10873.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: ensure SAN validation for prepared queries validates against all possible prepared query targets
```

3
.changelog/10901.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Properly encode non-URL safe characters in OIDC responses
```

View File

@ -87,6 +87,8 @@ func (a *Agent) vetServiceUpdateWithAuthorizer(authz acl.Authorizer, serviceID s
}
func (a *Agent) vetCheckRegisterWithAuthorizer(authz acl.Authorizer, check *structs.HealthCheck) error {
// TODO(partitions)
var authzContext acl.AuthorizerContext
check.FillAuthzContext(&authzContext)
// Vet the check itself.
@ -147,7 +149,7 @@ func (a *Agent) filterMembers(token string, members *[]serf.Member) error {
}
var authzContext acl.AuthorizerContext
structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
a.agentEnterpriseMeta().FillAuthzContext(&authzContext)
// Filter out members based on the node policy.
m := *members
for i := 0; i < len(m); i++ {
@ -188,7 +190,8 @@ func (a *Agent) filterChecksWithAuthorizer(authz acl.Authorizer, checks *map[str
continue
}
} else {
structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
// TODO(partition): should this be a Default or Node flavored entmeta?
check.NodeEnterpriseMetaForPartition().FillAuthzContext(&authzContext)
if authz.NodeRead(a.config.NodeName, &authzContext) == acl.Allow {
continue
}

View File

@ -434,6 +434,7 @@ func LocalConfig(cfg *config.RuntimeConfig) local.Config {
DiscardCheckOutput: cfg.DiscardCheckOutput,
NodeID: cfg.NodeID,
NodeName: cfg.NodeName,
Partition: cfg.PartitionOrDefault(),
TaggedAddresses: map[string]string{},
}
for k, v := range cfg.TaggedAddresses {
@ -561,8 +562,9 @@ func (a *Agent) Start(ctx context.Context) error {
State: a.State,
Tokens: a.baseDeps.Tokens,
Source: &structs.QuerySource{
Datacenter: a.config.Datacenter,
Segment: a.config.SegmentName,
Datacenter: a.config.Datacenter,
Segment: a.config.SegmentName,
NodePartition: a.config.PartitionOrEmpty(),
},
DNSConfig: proxycfg.DNSConfig{
Domain: a.config.DNSDomain,
@ -1529,11 +1531,13 @@ func (a *Agent) LocalMember() serf.Member {
// LANMembers is used to retrieve the LAN members
func (a *Agent) LANMembers() []serf.Member {
// TODO(partitions): filter this by the partition?
return a.delegate.LANMembers()
}
// WANMembers is used to retrieve the WAN members
func (a *Agent) WANMembers() []serf.Member {
// TODO(partitions): filter this by the partition by omitting wan results for now?
if srv, ok := a.delegate.(*consul.Server); ok {
return srv.WANMembers()
}
@ -1646,11 +1650,12 @@ OUTER:
for segment, coord := range cs {
agentToken := a.tokens.AgentToken()
req := structs.CoordinateUpdateRequest{
Datacenter: a.config.Datacenter,
Node: a.config.NodeName,
Segment: segment,
Coord: coord,
WriteRequest: structs.WriteRequest{Token: agentToken},
Datacenter: a.config.Datacenter,
Node: a.config.NodeName,
Segment: segment,
Coord: coord,
EnterpriseMeta: *a.agentEnterpriseMeta(),
WriteRequest: structs.WriteRequest{Token: agentToken},
}
var reply struct{}
// todo(kit) port all of these logger calls to hclog w/ loglevel configuration
@ -1674,7 +1679,7 @@ OUTER:
// reapServicesInternal does a single pass, looking for services to reap.
func (a *Agent) reapServicesInternal() {
reaped := make(map[structs.ServiceID]bool)
for checkID, cs := range a.State.CriticalCheckStates(structs.WildcardEnterpriseMetaInDefaultPartition()) {
for checkID, cs := range a.State.AllCriticalCheckStates() {
serviceID := cs.Check.CompoundServiceID()
// There's nothing to do if there's no service.
@ -2004,7 +2009,7 @@ func (a *Agent) addServiceInternal(req addServiceInternalRequest) error {
// Agent.Start does not have a snapshot, and we don't want to query
// State.Checks each time.
if req.checkStateSnapshot == nil {
req.checkStateSnapshot = a.State.Checks(structs.WildcardEnterpriseMetaInDefaultPartition())
req.checkStateSnapshot = a.State.AllChecks()
}
// Create an associated health check
@ -2458,6 +2463,8 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
// Need its config to know whether we should reroute checks to it
var proxy *structs.NodeService
if service != nil {
// NOTE: Both services must live in the same namespace and
// partition so this will correctly scope the results.
for _, svc := range a.State.Services(&service.EnterpriseMeta) {
if svc.Proxy.DestinationServiceID == service.ID {
proxy = svc
@ -2719,6 +2726,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
var rpcReq structs.NodeSpecificRequest
rpcReq.Datacenter = a.config.Datacenter
rpcReq.EnterpriseMeta = *a.agentEnterpriseMeta()
// The token to set is really important. The behavior below follows
// the same behavior as anti-entropy: we use the user-specified token
@ -3297,7 +3305,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
// unloadServices will deregister all services.
func (a *Agent) unloadServices() error {
for id := range a.State.Services(structs.WildcardEnterpriseMetaInDefaultPartition()) {
for id := range a.State.AllServices() {
if err := a.removeServiceLocked(id, false); err != nil {
return fmt.Errorf("Failed deregistering service '%s': %v", id, err)
}
@ -3411,7 +3419,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
// unloadChecks will deregister all checks known to the local agent.
func (a *Agent) unloadChecks() error {
for id := range a.State.Checks(structs.WildcardEnterpriseMetaInDefaultPartition()) {
for id := range a.State.AllChecks() {
if err := a.removeCheckLocked(id, false); err != nil {
return fmt.Errorf("Failed deregistering check '%s': %s", id, err)
}
@ -3423,7 +3431,7 @@ func (a *Agent) unloadChecks() error {
// checks. This is done before we reload our checks, so that we can properly
// restore into the same state.
func (a *Agent) snapshotCheckState() map[structs.CheckID]*structs.HealthCheck {
return a.State.Checks(structs.WildcardEnterpriseMetaInDefaultPartition())
return a.State.AllChecks()
}
// loadMetadata loads node metadata fields from the agent config and

View File

@ -82,6 +82,7 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
PrimaryDatacenter string
NodeName string
NodeID string
Partition string `json:",omitempty"`
Revision string
Server bool
Version string
@ -90,6 +91,7 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
PrimaryDatacenter: s.agent.config.PrimaryDatacenter,
NodeName: s.agent.config.NodeName,
NodeID: string(s.agent.config.NodeID),
Partition: s.agent.config.PartitionOrEmpty(),
Revision: s.agent.config.Revision,
Server: s.agent.config.ServerMode,
Version: s.agent.config.Version,
@ -305,6 +307,12 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request
return nil, err
}
if !s.validateRequestPartition(resp, &entMeta) {
return nil, nil
}
// NOTE: we're explicitly fetching things in the requested partition and
// namespace here.
services := s.agent.State.Services(&entMeta)
if err := s.agent.filterServicesWithAuthorizer(authz, &services); err != nil {
return nil, err
@ -368,6 +376,10 @@ func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request)
sid := structs.NewServiceID(id, &entMeta)
if !s.validateRequestPartition(resp, &entMeta) {
return nil, nil
}
dc := s.agent.config.Datacenter
resultHash, service, err := s.agent.LocalBlockingQuery(false, hash, queryOpts.MaxQueryTime,
@ -400,6 +412,7 @@ func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request)
aSvc := buildAgentService(svc, dc)
reply := &aSvc
// TODO(partitions): do we need to do anything here?
rawHash, err := hashstructure.Hash(reply, nil)
if err != nil {
return "", nil, err
@ -432,6 +445,10 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request)
return nil, err
}
if !s.validateRequestPartition(resp, &entMeta) {
return nil, nil
}
var filterExpression string
s.parseFilter(req, &filterExpression)
filter, err := bexpr.CreateFilter(filterExpression, nil, nil)
@ -439,6 +456,7 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request)
return nil, err
}
// NOTE(partitions): this works because nodes exist in ONE partition
checks := s.agent.State.Checks(&entMeta)
if err := s.agent.filterChecksWithAuthorizer(authz, &checks); err != nil {
return nil, err
@ -485,6 +503,8 @@ func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request)
}
}
// TODO(partitions): likely partitions+segment integration will take care of this
var members []serf.Member
if wan {
members = s.agent.WANMembers()
@ -521,6 +541,7 @@ func (s *HTTPHandlers) AgentJoin(resp http.ResponseWriter, req *http.Request) (i
wan := false
if other := req.URL.Query().Get("wan"); other != "" {
wan = true
// TODO(partitions) : block wan join
}
// Get the address
@ -616,6 +637,10 @@ func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Re
return nil, err
}
if !s.validateRequestPartition(resp, &args.EnterpriseMeta) {
return nil, nil
}
// Construct the health check.
health := args.HealthCheck(s.agent.config.NodeName)
@ -674,6 +699,10 @@ func (s *HTTPHandlers) AgentDeregisterCheck(resp http.ResponseWriter, req *http.
return nil, err
}
if !s.validateRequestPartition(resp, &checkID.EnterpriseMeta) {
return nil, nil
}
if err := s.agent.RemoveCheck(checkID, true); err != nil {
return nil, err
}
@ -740,7 +769,7 @@ func (s *HTTPHandlers) AgentCheckUpdate(resp http.ResponseWriter, req *http.Requ
return s.agentCheckUpdate(resp, req, checkID, update.Status, update.Output)
}
func (s *HTTPHandlers) agentCheckUpdate(_resp http.ResponseWriter, req *http.Request, checkID types.CheckID, status string, output string) (interface{}, error) {
func (s *HTTPHandlers) agentCheckUpdate(resp http.ResponseWriter, req *http.Request, checkID types.CheckID, status string, output string) (interface{}, error) {
cid := structs.NewCheckID(checkID, nil)
// Get the provided token, if any, and vet against any ACL policies.
@ -762,6 +791,10 @@ func (s *HTTPHandlers) agentCheckUpdate(_resp http.ResponseWriter, req *http.Req
return nil, err
}
if !s.validateRequestPartition(resp, &cid.EnterpriseMeta) {
return nil, nil
}
if err := s.agent.updateTTLCheck(cid, status, output); err != nil {
return nil, err
}
@ -833,6 +866,10 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt
return nil, err
}
if !s.validateRequestPartition(resp, &entMeta) {
return nil, nil
}
sid := structs.NewServiceID(serviceID, &entMeta)
dc := s.agent.config.Datacenter
@ -891,35 +928,38 @@ func (s *HTTPHandlers) AgentHealthServiceByName(resp http.ResponseWriter, req *h
return nil, acl.ErrPermissionDenied
}
if !s.validateRequestPartition(resp, &entMeta) {
return nil, nil
}
dc := s.agent.config.Datacenter
code := http.StatusNotFound
status := fmt.Sprintf("ServiceName %s Not Found", serviceName)
services := s.agent.State.Services(&entMeta)
services := s.agent.State.ServicesByName(structs.NewServiceName(serviceName, &entMeta))
result := make([]api.AgentServiceChecksInfo, 0, 16)
for _, service := range services {
if service.Service == serviceName {
sid := structs.NewServiceID(service.ID, &entMeta)
sid := structs.NewServiceID(service.ID, &entMeta)
scode, sstatus, healthChecks := agentHealthService(sid, s)
serviceInfo := buildAgentService(service, dc)
res := api.AgentServiceChecksInfo{
AggregatedStatus: sstatus,
Checks: healthChecks,
Service: &serviceInfo,
}
result = append(result, res)
// When service is not found, we ignore it and keep existing HTTP status
if code == http.StatusNotFound {
code = scode
status = sstatus
}
// We take the worst of all statuses, so we keep iterating
// passing: 200 < warning: 429 < critical: 503
if code < scode {
code = scode
status = sstatus
}
scode, sstatus, healthChecks := agentHealthService(sid, s)
serviceInfo := buildAgentService(service, dc)
res := api.AgentServiceChecksInfo{
AggregatedStatus: sstatus,
Checks: healthChecks,
Service: &serviceInfo,
}
result = append(result, res)
// When service is not found, we ignore it and keep existing HTTP status
if code == http.StatusNotFound {
code = scode
status = sstatus
}
// We take the worst of all statuses, so we keep iterating
// passing: 200 < warning: 429 < critical: 503
if code < scode {
code = scode
status = sstatus
}
}
if returnTextPlain(req) {
@ -965,6 +1005,10 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
return nil, err
}
if !s.validateRequestPartition(resp, &args.EnterpriseMeta) {
return nil, nil
}
// Get the node service.
ns := args.NodeService()
if ns.Weights != nil {
@ -1104,6 +1148,10 @@ func (s *HTTPHandlers) AgentDeregisterService(resp http.ResponseWriter, req *htt
return nil, err
}
if !s.validateRequestPartition(resp, &sid.EnterpriseMeta) {
return nil, nil
}
if err := s.agent.RemoveService(sid); err != nil {
return nil, err
}
@ -1403,6 +1451,10 @@ func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *htt
args.MaxQueryTime = qOpts.MaxQueryTime
args.Token = qOpts.Token
if !s.validateRequestPartition(resp, &args.EnterpriseMeta) {
return nil, nil
}
raw, m, err := s.agent.cache.Get(req.Context(), cachetype.ConnectCALeafName, &args)
if err != nil {
return nil, err
@ -1442,6 +1494,10 @@ func (s *HTTPHandlers) AgentConnectAuthorize(resp http.ResponseWriter, req *http
return nil, BadRequestError{fmt.Sprintf("Request decode failed: %v", err)}
}
if !s.validateRequestPartition(resp, &authReq.EnterpriseMeta) {
return nil, nil
}
authz, reason, cacheMeta, err := s.agent.ConnectAuthorize(token, &authReq)
if err != nil {
return nil, err

View File

@ -0,0 +1,13 @@
// +build !consulent
package agent
import (
"net/http"
"github.com/hashicorp/consul/agent/structs"
)
func (s *HTTPHandlers) validateRequestPartition(_ http.ResponseWriter, _ *structs.EnterpriseMeta) bool {
return true
}

View File

@ -635,7 +635,7 @@ func TestAgent_Service(t *testing.T) {
req, _ := http.NewRequest("GET", tt.url, nil)
// Inject the root token for tests that don't care about ACL
var token = "root"
token := "root"
if tt.tokenRules != "" {
// Create new token and use that.
token = testCreateToken(t, a, tt.tokenRules)
@ -2131,7 +2131,6 @@ func TestAgent_ForceLeave(t *testing.T) {
r.Fatalf("got status %q want %q", got, want)
}
})
}
func TestOpenMetricsMimeTypeHeaders(t *testing.T) {
@ -2185,7 +2184,7 @@ func TestAgent_ForceLeave_ACLDeny(t *testing.T) {
t.Run("operator write token", func(t *testing.T) {
// Create an ACL with operator read permissions.
var rules = `
rules := `
operator = "write"
`
opToken := testCreateToken(t, a, rules)
@ -2226,7 +2225,6 @@ func TestAgent_ForceLeavePrune(t *testing.T) {
if member.Status != serf.StatusFailed {
r.Fatalf("got status %q want %q", member.Status, serf.StatusFailed)
}
}
}
})
@ -2246,7 +2244,6 @@ func TestAgent_ForceLeavePrune(t *testing.T) {
r.Fatalf("want one member, got %v", m)
}
})
}
func TestAgent_RegisterCheck(t *testing.T) {
@ -2646,7 +2643,6 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) {
require.NoError(r, err)
})
})
}
func TestAgent_DeregisterCheck(t *testing.T) {
@ -3335,6 +3331,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
{
"destination_type": "service",
"destination_namespace": "default",
"destination_partition": "default",
"destination_name": "db",
"local_bind_address": "` + tt.ip + `",
"local_bind_port": 1234,
@ -3363,6 +3360,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
{
"destination_type": "service",
"destination_namespace": "default",
"destination_partition": "default",
"destination_name": "db",
"local_bind_address": "` + tt.ip + `",
"local_bind_port": 1234,
@ -3420,6 +3418,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
DestinationType: structs.UpstreamDestTypeService,
DestinationName: "db",
DestinationNamespace: "default",
DestinationPartition: "default",
LocalBindAddress: tt.ip,
LocalBindPort: 1234,
Config: map[string]interface{}{
@ -3463,6 +3462,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
DestinationType: structs.UpstreamDestTypeService,
DestinationName: "db",
DestinationNamespace: "default",
DestinationPartition: "default",
LocalBindAddress: tt.ip,
LocalBindPort: 1234,
Config: map[string]interface{}{
@ -3662,6 +3662,10 @@ func testAgent_RegisterService_UnmanagedConnectProxy(t *testing.T, extraHCL stri
args.Proxy.Upstreams[i].DestinationNamespace =
structs.DefaultEnterpriseMetaInDefaultPartition().NamespaceOrEmpty()
}
if args.Proxy.Upstreams[i].DestinationPartition == "" {
args.Proxy.Upstreams[i].DestinationPartition =
structs.DefaultEnterpriseMetaInDefaultPartition().PartitionOrEmpty()
}
}
require.Equal(t, args.Proxy, svc.Proxy.ToAPI())
@ -4180,7 +4184,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
resp.Body.String())
// Sanity the target service registration
svcs := a.State.Services(nil)
svcs := a.State.AllServices()
// Parse the expected definition into a ServiceDefinition
var sd structs.ServiceDefinition
@ -4229,7 +4233,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
require.NoError(err)
require.Nil(obj)
svcs := a.State.Services(nil)
svcs := a.State.AllServices()
_, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)]
if tt.wantSidecarIDLeftAfterDereg {
require.True(ok, "removed non-sidecar service at "+tt.wantNS.ID)

View File

@ -50,3 +50,7 @@ func (a *Agent) stopLicenseManager() {}
func (a *Agent) enterpriseStats() map[string]map[string]string {
return nil
}
func (a *Agent) agentEnterpriseMeta() *structs.EnterpriseMeta {
return structs.NodeEnterpriseMetaInDefaultPartition()
}

View File

@ -129,7 +129,7 @@ var CatalogCounters = []prometheus.CounterDefinition{
func (s *HTTPHandlers) CatalogRegister(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_register"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
var args structs.RegisterRequest
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
@ -152,17 +152,17 @@ func (s *HTTPHandlers) CatalogRegister(resp http.ResponseWriter, req *http.Reque
var out struct{}
if err := s.agent.RPC("Catalog.Register", &args, &out); err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_register"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_register"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return true, nil
}
func (s *HTTPHandlers) CatalogDeregister(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_deregister"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
var args structs.DeregisterRequest
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
@ -184,17 +184,17 @@ func (s *HTTPHandlers) CatalogDeregister(resp http.ResponseWriter, req *http.Req
var out struct{}
if err := s.agent.RPC("Catalog.Deregister", &args, &out); err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_deregister"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_deregister"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return true, nil
}
func (s *HTTPHandlers) CatalogDatacenters(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_datacenters"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
args := structs.DatacentersRequest{}
s.parseConsistency(resp, req, &args.QueryOptions)
@ -205,7 +205,7 @@ func (s *HTTPHandlers) CatalogDatacenters(resp http.ResponseWriter, req *http.Re
raw, m, err := s.agent.cache.Get(req.Context(), cachetype.CatalogDatacentersName, &args)
if err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_datacenters"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
reply, ok := raw.(*[]string)
@ -218,19 +218,19 @@ func (s *HTTPHandlers) CatalogDatacenters(resp http.ResponseWriter, req *http.Re
} else {
if err := s.agent.RPC("Catalog.ListDatacenters", &args, &out); err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_datacenters"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
}
metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_datacenters"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return out, nil
}
func (s *HTTPHandlers) CatalogNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_nodes"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
// Setup the request
args := structs.DCSpecificRequest{}
@ -241,7 +241,7 @@ func (s *HTTPHandlers) CatalogNodes(resp http.ResponseWriter, req *http.Request)
args.NodeMetaFilters = s.parseMetaFilter(req)
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_nodes"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, nil
}
@ -265,15 +265,14 @@ RETRY_ONCE:
out.Nodes = make(structs.Nodes, 0)
}
metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_nodes"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return out.Nodes, nil
}
func (s *HTTPHandlers) CatalogServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
// Set default DC
args := structs.DCSpecificRequest{}
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
@ -290,7 +289,7 @@ func (s *HTTPHandlers) CatalogServices(resp http.ResponseWriter, req *http.Reque
raw, m, err := s.agent.cache.Get(req.Context(), cachetype.CatalogListServicesName, &args)
if err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
reply, ok := raw.(*structs.IndexedServices)
@ -304,7 +303,7 @@ func (s *HTTPHandlers) CatalogServices(resp http.ResponseWriter, req *http.Reque
RETRY_ONCE:
if err := s.agent.RPC("Catalog.ListServices", &args, &out); err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact {
@ -321,7 +320,7 @@ func (s *HTTPHandlers) CatalogServices(resp http.ResponseWriter, req *http.Reque
out.Services = make(structs.Services)
}
metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return out.Services, nil
}
@ -342,9 +341,8 @@ func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.R
}
metrics.IncrCounterWithLabels([]string{"client", "api", metricsKey}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
// Set default DC
args := structs.ServiceSpecificRequest{Connect: connect}
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
@ -379,7 +377,7 @@ func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.R
raw, m, err := s.agent.cache.Get(req.Context(), cachetype.CatalogServicesName, &args)
if err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_service_nodes"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
defer setCacheMeta(resp, &m)
@ -393,7 +391,7 @@ func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.R
RETRY_ONCE:
if err := s.agent.RPC("Catalog.ServiceNodes", &args, &out); err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_service_nodes"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact {
@ -418,13 +416,13 @@ func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.R
}
}
metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_service_nodes"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return out.ServiceNodes, nil
}
func (s *HTTPHandlers) CatalogNodeServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_node_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
// Set default Datacenter
args := structs.NodeSpecificRequest{}
@ -450,7 +448,7 @@ func (s *HTTPHandlers) CatalogNodeServices(resp http.ResponseWriter, req *http.R
RETRY_ONCE:
if err := s.agent.RPC("Catalog.NodeServices", &args, &out); err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_node_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact {
@ -481,13 +479,13 @@ RETRY_ONCE:
}
}
metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_node_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return out.NodeServices, nil
}
func (s *HTTPHandlers) CatalogNodeServiceList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_node_service_list"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
// Set default Datacenter
args := structs.NodeSpecificRequest{}
@ -513,7 +511,7 @@ func (s *HTTPHandlers) CatalogNodeServiceList(resp http.ResponseWriter, req *htt
RETRY_ONCE:
if err := s.agent.RPC("Catalog.NodeServiceList", &args, &out); err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_node_service_list"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact {
@ -531,13 +529,13 @@ RETRY_ONCE:
}
}
metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_node_service_list"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return &out.NodeServices, nil
}
func (s *HTTPHandlers) CatalogGatewayServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_gateway_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
var args structs.ServiceSpecificRequest
@ -562,7 +560,7 @@ func (s *HTTPHandlers) CatalogGatewayServices(resp http.ResponseWriter, req *htt
RETRY_ONCE:
if err := s.agent.RPC("Catalog.GatewayServices", &args, &out); err != nil {
metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_gateway_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return nil, err
}
if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact {
@ -573,6 +571,6 @@ RETRY_ONCE:
out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel()
metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_gateway_services"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
s.nodeMetricsLabels())
return out.Services, nil
}

View File

@ -0,0 +1,9 @@
// +build !consulent
package agent
import "github.com/armon/go-metrics"
func (s *HTTPHandlers) nodeMetricsLabels() []metrics.Label {
return []metrics.Label{{Name: "node", Value: s.nodeName()}}
}

View File

@ -108,7 +108,7 @@ func (c *CheckAlias) runLocal(stopCh chan struct{}) {
}
updateStatus := func() {
checks := c.Notify.Checks(structs.WildcardEnterpriseMetaInDefaultPartition())
checks := c.Notify.Checks(c.WildcardEnterpriseMetaForPartition())
checksList := make([]*structs.HealthCheck, 0, len(checks))
for _, chk := range checks {
checksList = append(checksList, chk)

View File

@ -366,7 +366,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
// process/merge some complex values
//
var dnsServiceTTL = map[string]time.Duration{}
dnsServiceTTL := map[string]time.Duration{}
for k, v := range c.DNS.ServiceTTL {
dnsServiceTTL[k] = b.durationVal(fmt.Sprintf("dns_config.service_ttl[%q]", k), &v)
}
@ -688,7 +688,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
}
autoEncryptAllowTLS := boolVal(c.AutoEncrypt.AllowTLS)
autoConfig := b.autoConfigVal(c.AutoConfig)
autoConfig := b.autoConfigVal(c.AutoConfig, stringVal(c.Partition))
if autoEncryptAllowTLS || autoConfig.Enabled {
connectEnabled = true
}
@ -1181,10 +1181,9 @@ func validateBasicName(field, value string, allowEmpty bool) error {
// validate performs semantic validation of the runtime configuration.
func (b *builder) validate(rt RuntimeConfig) error {
// validContentPath defines a regexp for a valid content path name.
var validContentPath = regexp.MustCompile(`^[A-Za-z0-9/_-]+$`)
var hasVersion = regexp.MustCompile(`^/v\d+/$`)
validContentPath := regexp.MustCompile(`^[A-Za-z0-9/_-]+$`)
hasVersion := regexp.MustCompile(`^/v\d+/$`)
// ----------------------------------------------------------------
// check required params we cannot recover from first
//
@ -1651,7 +1650,6 @@ func (b *builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition {
b.err = multierror.Append(
fmt.Errorf("service %s cannot have both socket path %s and address/port",
stringVal(v.Name), stringVal(v.SocketPath)), b.err)
}
return &structs.ServiceDefinition{
@ -1718,6 +1716,7 @@ func (b *builder) upstreamsVal(v []Upstream) structs.Upstreams {
ups[i] = structs.Upstream{
DestinationType: stringVal(u.DestinationType),
DestinationNamespace: stringVal(u.DestinationNamespace),
DestinationPartition: stringVal(u.DestinationPartition),
DestinationName: stringVal(u.DestinationName),
Datacenter: stringVal(u.Datacenter),
LocalBindAddress: stringVal(u.LocalBindAddress),
@ -2231,7 +2230,7 @@ func (b *builder) makeAddrs(pri []net.Addr, sec []*net.IPAddr, port int) []net.A
return x
}
func (b *builder) autoConfigVal(raw AutoConfigRaw) AutoConfig {
func (b *builder) autoConfigVal(raw AutoConfigRaw, agentPartition string) AutoConfig {
var val AutoConfig
val.Enabled = boolValWithDefault(raw.Enabled, false)
@ -2259,12 +2258,12 @@ func (b *builder) autoConfigVal(raw AutoConfigRaw) AutoConfig {
val.IPSANs = append(val.IPSANs, ip)
}
val.Authorizer = b.autoConfigAuthorizerVal(raw.Authorization)
val.Authorizer = b.autoConfigAuthorizerVal(raw.Authorization, agentPartition)
return val
}
func (b *builder) autoConfigAuthorizerVal(raw AutoConfigAuthorizationRaw) AutoConfigAuthorizer {
func (b *builder) autoConfigAuthorizerVal(raw AutoConfigAuthorizationRaw, agentPartition string) AutoConfigAuthorizer {
// Our config file syntax wraps the static authorizer configuration in a "static" stanza. However
// internally we do not support multiple configured authorization types so the RuntimeConfig just
// inlines the static one. While we can and probably should extend the authorization types in the
@ -2272,13 +2271,16 @@ func (b *builder) autoConfigAuthorizerVal(raw AutoConfigAuthorizationRaw) AutoCo
// needed right now so the configuration types will remain simplistic until they need to be otherwise.
var val AutoConfigAuthorizer
entMeta := structs.DefaultEnterpriseMetaInPartition(agentPartition)
entMeta.Normalize()
val.Enabled = boolValWithDefault(raw.Enabled, false)
val.ClaimAssertions = raw.Static.ClaimAssertions
val.AllowReuse = boolValWithDefault(raw.Static.AllowReuse, false)
val.AuthMethod = structs.ACLAuthMethod{
Name: "Auto Config Authorizer",
Type: "jwt",
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
EnterpriseMeta: *entMeta,
Config: map[string]interface{}{
"JWTSupportedAlgs": raw.Static.JWTSupportedAlgs,
"BoundAudiences": raw.Static.BoundAudiences,
@ -2366,7 +2368,6 @@ func validateAutoConfigAuthorizer(rt RuntimeConfig) error {
// build out the validator to ensure that the given configuration was valid
null := hclog.NewNullLogger()
validator, err := ssoauth.NewValidator(null, &authz.AuthMethod)
if err != nil {
return fmt.Errorf("auto_config.authorization.static has invalid configuration: %v", err)
}

View File

@ -511,6 +511,7 @@ type Upstream struct {
// on service definitions in various places.
DestinationType *string `mapstructure:"destination_type"`
DestinationNamespace *string `mapstructure:"destination_namespace"`
DestinationPartition *string `mapstructure:"destination_partition"`
DestinationName *string `mapstructure:"destination_name"`
// Datacenter that the service discovery request should be run against. Note

View File

@ -4,4 +4,5 @@ package config
type EnterpriseRuntimeConfig struct{}
func (c *RuntimeConfig) PartitionOrEmpty() string { return "" }
func (c *RuntimeConfig) PartitionOrEmpty() string { return "" }
func (c *RuntimeConfig) PartitionOrDefault() string { return "" }

View File

@ -5656,6 +5656,7 @@ func TestLoad_FullConfig(t *testing.T) {
{
DestinationType: "service", // Default should be explicitly filled
DestinationName: "KPtAj2cb",
DestinationPartition: defaultEntMeta.PartitionOrEmpty(),
DestinationNamespace: defaultEntMeta.NamespaceOrEmpty(),
LocalBindPort: 4051,
Config: map[string]interface{}{
@ -5665,6 +5666,7 @@ func TestLoad_FullConfig(t *testing.T) {
{
DestinationType: "prepared_query",
DestinationNamespace: "9nakw0td",
DestinationPartition: "part-9nakw0td",
DestinationName: "KSd8HsRl",
LocalBindPort: 11884,
LocalBindAddress: "127.24.88.0",
@ -5672,6 +5674,7 @@ func TestLoad_FullConfig(t *testing.T) {
{
DestinationType: "prepared_query",
DestinationNamespace: "9nakw0td",
DestinationPartition: "part-9nakw0td",
DestinationName: "placeholder",
LocalBindSocketPath: "/foo/bar/upstream",
LocalBindSocketMode: "0600",

View File

@ -576,6 +576,7 @@ services = [
{
destination_type = "prepared_query"
destination_namespace = "9nakw0td"
destination_partition = "part-9nakw0td"
destination_name = "KSd8HsRl"
local_bind_port = 11884
local_bind_address = "127.24.88.0"
@ -583,6 +584,7 @@ services = [
{
destination_type = "prepared_query"
destination_namespace = "9nakw0td"
destination_partition = "part-9nakw0td"
destination_name = "placeholder"
local_bind_socket_path = "/foo/bar/upstream"
local_bind_socket_mode = "0600"

View File

@ -589,6 +589,7 @@
{
"destination_name": "KSd8HsRl",
"destination_namespace": "9nakw0td",
"destination_partition": "part-9nakw0td",
"destination_type": "prepared_query",
"local_bind_address": "127.24.88.0",
"local_bind_port": 11884
@ -596,6 +597,7 @@
{
"destination_name": "placeholder",
"destination_namespace": "9nakw0td",
"destination_partition": "part-9nakw0td",
"destination_type": "prepared_query",
"local_bind_socket_path": "/foo/bar/upstream",
"local_bind_socket_mode": "0600"

View File

@ -1457,6 +1457,7 @@ func (f *aclFilter) filterNodeServices(services **structs.NodeServices) {
}
var authzContext acl.AuthorizerContext
// TODO(partitions): put partition into this wildcard?
structs.WildcardEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
if !f.allowNode((*services).Node.Node, &authzContext) {
*services = nil
@ -1481,6 +1482,7 @@ func (f *aclFilter) filterNodeServiceList(services **structs.NodeServiceList) {
}
var authzContext acl.AuthorizerContext
// TODO(partitions): put partition into this wildcard?
structs.WildcardEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
if !f.allowNode((*services).Node.Node, &authzContext) {
*services = nil
@ -1578,6 +1580,7 @@ func (f *aclFilter) filterSessions(sessions *structs.Sessions) {
func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) {
c := *coords
var authzContext acl.AuthorizerContext
// TODO(partitions): put partition into this wildcard?
structs.WildcardEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
for i := 0; i < len(c); i++ {
@ -1619,6 +1622,7 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) {
info := nd[i]
// Filter nodes
// TODO(partitions): put partition into this wildcard?
structs.WildcardEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
if node := info.Node; !f.allowNode(node, &authzContext) {
f.logger.Debug("dropping node from result due to ACLs", "node", node)
@ -1687,6 +1691,7 @@ func (f *aclFilter) filterNodes(nodes *structs.Nodes) {
n := *nodes
var authzContext acl.AuthorizerContext
// TODO(partitions): put partition into this wildcard?
structs.WildcardEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
for i := 0; i < len(n); i++ {

View File

@ -6,11 +6,13 @@ import (
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/raft"
autopilot "github.com/hashicorp/raft-autopilot"
"github.com/hashicorp/serf/serf"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/types"
)
var AutopilotGauges = []prometheus.GaugeDefinition{
@ -127,7 +129,7 @@ func (s *Server) autopilotServerFromMetadata(srv *metadata.Server) (*autopilot.S
// populate the node meta if there is any. When a node first joins or if
// there are ACL issues then this could be empty if the server has not
// yet been able to register itself in the catalog
_, node, err := s.fsm.State().GetNodeID(types.NodeID(srv.ID))
_, node, err := s.fsm.State().GetNodeID(types.NodeID(srv.ID), structs.NodeEnterpriseMetaInDefaultPartition())
if err != nil {
return nil, fmt.Errorf("error retrieving node from state store: %w", err)
}

View File

@ -474,11 +474,10 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
var err error
// TODO(partitions)
if len(args.NodeMetaFilters) > 0 {
reply.Index, reply.Nodes, err = state.NodesByMeta(ws, args.NodeMetaFilters, nil)
reply.Index, reply.Nodes, err = state.NodesByMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta)
} else {
reply.Index, reply.Nodes, err = state.Nodes(ws, nil)
reply.Index, reply.Nodes, err = state.Nodes(ws, &args.EnterpriseMeta)
}
if err != nil {
return err

View File

@ -61,6 +61,7 @@ func (c *Client) setupSerf(conf *serf.Config, ch chan serf.Event, path string) (
nodeID: c.config.NodeID,
nodeName: c.config.NodeName,
segment: c.config.Segment,
server: false,
}
conf.SnapshotPath = filepath.Join(c.config.DataDir, path)
@ -68,7 +69,7 @@ func (c *Client) setupSerf(conf *serf.Config, ch chan serf.Event, path string) (
return nil, err
}
addEnterpriseSerfTags(conf.Tags)
addEnterpriseSerfTags(conf.Tags, c.config.agentEnterpriseMeta())
conf.ReconnectTimeoutOverride = libserf.NewReconnectOverride(c.logger)

View File

@ -479,7 +479,7 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
cfgMap := make(map[string]interface{})
upstreamDefaults.MergeInto(cfgMap)
wildcard := structs.NewServiceID(structs.WildcardSpecifier, structs.WildcardEnterpriseMetaInDefaultPartition())
wildcard := structs.NewServiceID(structs.WildcardSpecifier, args.WildcardEnterpriseMetaForPartition())
usConfigs[wildcard] = cfgMap
}
}

View File

@ -0,0 +1,9 @@
// +build !consulent
package consul
import "github.com/hashicorp/consul/agent/structs"
func (c *Config) agentEnterpriseMeta() *structs.EnterpriseMeta {
return structs.NodeEnterpriseMetaInDefaultPartition()
}

View File

@ -86,10 +86,13 @@ func (c *Coordinate) batchApplyUpdates() error {
break
}
update.EnterpriseMeta.Normalize()
updates[i] = &structs.Coordinate{
Node: update.Node,
Segment: update.Segment,
Coord: update.Coord,
Node: update.Node,
Segment: update.Segment,
Coord: update.Coord,
Partition: update.PartitionOrEmpty(),
}
i++
}
@ -138,12 +141,17 @@ func (c *Coordinate) Update(args *structs.CoordinateUpdateRequest, reply *struct
}
// Fetch the ACL token, if any, and enforce the node policy if enabled.
authz, err := c.srv.ResolveToken(args.Token)
authz, err := c.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil)
if err != nil {
return err
}
if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
var authzContext acl.AuthorizerContext
structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
args.DefaultEnterpriseMetaForPartition().FillAuthzContext(&authzContext)
if authz.NodeWrite(args.Node, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
@ -166,6 +174,8 @@ func (c *Coordinate) ListDatacenters(args *struct{}, reply *[]structs.Datacenter
return err
}
// TODO(partitions):
var out []structs.DatacenterMap
// Strip the datacenter suffixes from all the node names.
@ -194,11 +204,19 @@ func (c *Coordinate) ListNodes(args *structs.DCSpecificRequest, reply *structs.I
return err
}
_, err := c.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil)
if err != nil {
return err
}
if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
return c.srv.blockingQuery(&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
// TODO(partitions)
index, coords, err := state.Coordinates(ws, nil)
index, coords, err := state.Coordinates(ws, &args.EnterpriseMeta)
if err != nil {
return err
}
@ -220,21 +238,27 @@ func (c *Coordinate) Node(args *structs.NodeSpecificRequest, reply *structs.Inde
// Fetch the ACL token, if any, and enforce the node policy if enabled.
authz, err := c.srv.ResolveToken(args.Token)
authz, err := c.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil)
if err != nil {
return err
}
if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
var authzContext acl.AuthorizerContext
structs.WildcardEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
args.WildcardEnterpriseMetaForPartition().FillAuthzContext(&authzContext)
if authz.NodeRead(args.Node, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
// TODO(partitions): do we have to add EnterpriseMeta to the reply like in Catalog.ListServices?
return c.srv.blockingQuery(&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
// TODO(partitions)
index, nodeCoords, err := state.Coordinate(ws, args.Node, nil)
index, nodeCoords, err := state.Coordinate(ws, args.Node, &args.EnterpriseMeta)
if err != nil {
return err
}
@ -242,9 +266,10 @@ func (c *Coordinate) Node(args *structs.NodeSpecificRequest, reply *structs.Inde
var coords structs.Coordinates
for segment, coord := range nodeCoords {
coords = append(coords, &structs.Coordinate{
Node: args.Node,
Segment: segment,
Coord: coord,
Node: args.Node,
Segment: segment,
Partition: args.PartitionOrEmpty(),
Coord: coord,
})
}
reply.Index, reply.Coordinates = index, coords

View File

@ -84,14 +84,12 @@ func TestCoordinate_Update(t *testing.T) {
// Make sure the updates did not yet apply because the update period
// hasn't expired.
state := s1.fsm.State()
// TODO(partitions)
_, c, err := state.Coordinate(nil, "node1", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
require.Equal(t, lib.CoordinateSet{}, c)
// TODO(partitions)
_, c, err = state.Coordinate(nil, "node2", nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -107,7 +105,6 @@ func TestCoordinate_Update(t *testing.T) {
// Wait a while and the updates should get picked up.
time.Sleep(3 * s1.config.CoordinateUpdatePeriod)
// TODO(partitions)
_, c, err = state.Coordinate(nil, "node1", nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -117,7 +114,6 @@ func TestCoordinate_Update(t *testing.T) {
}
require.Equal(t, expected, c)
// TODO(partitions)
_, c, err = state.Coordinate(nil, "node2", nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -157,7 +153,6 @@ func TestCoordinate_Update(t *testing.T) {
time.Sleep(3 * s1.config.CoordinateUpdatePeriod)
numDropped := 0
for i := 0; i < spamLen; i++ {
// TODO(partitions)
_, c, err = state.Coordinate(nil, fmt.Sprintf("bogusnode%d", i), nil)
if err != nil {
t.Fatalf("err: %v", err)

View File

@ -55,6 +55,7 @@ func (c *DiscoveryChain) Get(args *structs.DiscoveryChainRequest, reply *structs
req := discoverychain.CompileRequest{
ServiceName: args.Name,
EvaluateInNamespace: entMeta.NamespaceOrDefault(),
EvaluateInPartition: entMeta.PartitionOrDefault(),
EvaluateInDatacenter: evalDC,
UseInDatacenter: c.srv.config.Datacenter,
OverrideMeshGateway: args.OverrideMeshGateway,

View File

@ -15,6 +15,7 @@ import (
type CompileRequest struct {
ServiceName string
EvaluateInNamespace string
EvaluateInPartition string
EvaluateInDatacenter string
EvaluateInTrustDomain string
UseInDatacenter string // where the results will be used from
@ -906,11 +907,9 @@ RESOLVE_AGAIN:
// TODO (mesh-gateway)- maybe allow using a gateway within a datacenter at some point
if target.Datacenter == c.useInDatacenter {
target.MeshGateway.Mode = structs.MeshGatewayModeDefault
} else if target.External {
// Bypass mesh gateways if it is an external service.
target.MeshGateway.Mode = structs.MeshGatewayModeDefault
} else {
// Default mesh gateway settings
if serviceDefault := c.entries.GetService(targetID); serviceDefault != nil {

View File

@ -71,7 +71,7 @@ func (s *Server) validateEnterpriseIntentionNamespace(ns string, _ bool) error {
return errors.New("Namespaces is a Consul Enterprise feature")
}
func addEnterpriseSerfTags(_ map[string]string) {
func addEnterpriseSerfTags(_ map[string]string, _ *structs.EnterpriseMeta) {
// do nothing
}

View File

@ -47,6 +47,7 @@ func (t *txnResultsFilter) Filter(i int) bool {
result.KV.EnterpriseMeta.FillAuthzContext(&authzContext)
return t.authorizer.KeyRead(result.KV.Key, &authzContext) != acl.Allow
case result.Node != nil:
// TODO(partitions): put partition into this wildcard?
structs.WildcardEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
return t.authorizer.NodeRead(result.Node.Node, &authzContext) != acl.Allow
case result.Service != nil:

View File

@ -692,9 +692,9 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
require.Equal(t, fedState2, fedStateLoaded2)
// Verify usage data is correctly updated
idx, nodeCount, err := fsm2.state.NodeCount()
idx, nodeUsage, err := fsm2.state.NodeUsage()
require.NoError(t, err)
require.Equal(t, len(nodes), nodeCount)
require.Equal(t, len(nodes), nodeUsage.Nodes)
require.NotZero(t, idx)
// Verify system metadata is restored.

View File

@ -532,6 +532,8 @@ func (s *Server) initializeACLs(ctx context.Context, upgrade bool) error {
s.logger.Info("initializing acls")
// TODO(partitions): initialize acls in all of the partitions?
// Create/Upgrade the builtin global-management policy
_, policy, err := s.fsm.State().ACLPolicyGetByID(nil, structs.ACLPolicyGlobalManagementID, structs.DefaultEnterpriseMetaInDefaultPartition())
if err != nil {
@ -1110,9 +1112,13 @@ func (s *Server) bootstrapConfigEntries(entries []structs.ConfigEntry) error {
// reconcileReaped is used to reconcile nodes that have failed and been reaped
// from Serf but remain in the catalog. This is done by looking for unknown nodes with serfHealth checks registered.
// We generate a "reap" event to cause the node to be cleaned up.
func (s *Server) reconcileReaped(known map[string]struct{}) error {
func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *structs.EnterpriseMeta) error {
if nodeEntMeta == nil {
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
}
state := s.fsm.State()
_, checks, err := state.ChecksInState(nil, api.HealthAny, structs.DefaultEnterpriseMetaInDefaultPartition())
_, checks, err := state.ChecksInState(nil, api.HealthAny, nodeEntMeta)
if err != nil {
return err
}
@ -1128,7 +1134,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}) error {
}
// Get the node services, look for ConsulServiceID
_, services, err := state.NodeServices(nil, check.Node, structs.DefaultEnterpriseMetaInDefaultPartition())
_, services, err := state.NodeServices(nil, check.Node, nodeEntMeta)
if err != nil {
return err
}
@ -1139,8 +1145,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}) error {
CHECKS:
for _, service := range services.Services {
if service.ID == structs.ConsulServiceID {
// TODO(partitions)
_, node, err := state.GetNode(check.Node, nil)
_, node, err := state.GetNode(check.Node, nodeEntMeta)
if err != nil {
s.logger.Error("Unable to look up node with name", "name", check.Node, "error", err)
continue CHECKS
@ -1165,6 +1170,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}) error {
"role": "node",
},
}
addEnterpriseSerfTags(member.Tags, nodeEntMeta)
// Create the appropriate tags if this was a server node
if serverPort > 0 {
@ -1175,7 +1181,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}) error {
}
// Attempt to reap this member
if err := s.handleReapMember(member); err != nil {
if err := s.handleReapMember(member, nodeEntMeta); err != nil {
return err
}
}
@ -1187,23 +1193,28 @@ func (s *Server) reconcileReaped(known map[string]struct{}) error {
func (s *Server) reconcileMember(member serf.Member) error {
// Check if this is a member we should handle
if !s.shouldHandleMember(member) {
// TODO(partition): log the partition name
s.logger.Warn("skipping reconcile of node", "member", member)
return nil
}
defer metrics.MeasureSince([]string{"leader", "reconcileMember"}, time.Now())
nodeEntMeta := getSerfMemberEnterpriseMeta(member)
var err error
switch member.Status {
case serf.StatusAlive:
err = s.handleAliveMember(member)
err = s.handleAliveMember(member, nodeEntMeta)
case serf.StatusFailed:
err = s.handleFailedMember(member)
err = s.handleFailedMember(member, nodeEntMeta)
case serf.StatusLeft:
err = s.handleLeftMember(member)
err = s.handleLeftMember(member, nodeEntMeta)
case StatusReap:
err = s.handleReapMember(member)
err = s.handleReapMember(member, nodeEntMeta)
}
if err != nil {
s.logger.Error("failed to reconcile member",
// TODO(partition): log the partition name
"member", member,
"error", err,
)
@ -1231,7 +1242,11 @@ func (s *Server) shouldHandleMember(member serf.Member) bool {
// handleAliveMember is used to ensure the node
// is registered, with a passing health check.
func (s *Server) handleAliveMember(member serf.Member) error {
func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
if nodeEntMeta == nil {
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
}
// Register consul service if a server
var service *structs.NodeService
if valid, parts := metadata.IsConsulServer(member); valid {
@ -1243,6 +1258,7 @@ func (s *Server) handleAliveMember(member serf.Member) error {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *nodeEntMeta,
Meta: map[string]string{
// DEPRECATED - remove nonvoter in favor of read_replica in a future version of consul
"non_voter": strconv.FormatBool(member.Tags["nonvoter"] == "1"),
@ -1263,8 +1279,7 @@ func (s *Server) handleAliveMember(member serf.Member) error {
// Check if the node exists
state := s.fsm.State()
// TODO(partitions)
_, node, err := state.GetNode(member.Name, nil)
_, node, err := state.GetNode(member.Name, nodeEntMeta)
if err != nil {
return err
}
@ -1272,7 +1287,7 @@ func (s *Server) handleAliveMember(member serf.Member) error {
// Check if the associated service is available
if service != nil {
match := false
_, services, err := state.NodeServices(nil, member.Name, structs.DefaultEnterpriseMetaInDefaultPartition())
_, services, err := state.NodeServices(nil, member.Name, nodeEntMeta)
if err != nil {
return err
}
@ -1290,7 +1305,7 @@ func (s *Server) handleAliveMember(member serf.Member) error {
}
// Check if the serfCheck is in the passing state
_, checks, err := state.NodeChecks(nil, member.Name, structs.DefaultEnterpriseMetaInDefaultPartition())
_, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta)
if err != nil {
return err
}
@ -1317,6 +1332,7 @@ AFTER_CHECK:
Status: api.HealthPassing,
Output: structs.SerfCheckAliveOutput,
},
EnterpriseMeta: *nodeEntMeta,
}
if node != nil {
req.TaggedAddresses = node.TaggedAddresses
@ -1329,11 +1345,14 @@ AFTER_CHECK:
// handleFailedMember is used to mark the node's status
// as being critical, along with all checks as unknown.
func (s *Server) handleFailedMember(member serf.Member) error {
func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
if nodeEntMeta == nil {
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
}
// Check if the node exists
state := s.fsm.State()
// TODO(partitions)
_, node, err := state.GetNode(member.Name, nil)
_, node, err := state.GetNode(member.Name, nodeEntMeta)
if err != nil {
return err
}
@ -1343,9 +1362,11 @@ func (s *Server) handleFailedMember(member serf.Member) error {
return nil
}
// TODO(partitions): get the ent meta by parsing serf tags
if node.Address == member.Addr.String() {
// Check if the serfCheck is in the critical state
_, checks, err := state.NodeChecks(nil, member.Name, structs.DefaultEnterpriseMetaInDefaultPartition())
_, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta)
if err != nil {
return err
}
@ -1359,10 +1380,11 @@ func (s *Server) handleFailedMember(member serf.Member) error {
// Register with the catalog
req := structs.RegisterRequest{
Datacenter: s.config.Datacenter,
Node: member.Name,
ID: types.NodeID(member.Tags["id"]),
Address: member.Addr.String(),
Datacenter: s.config.Datacenter,
Node: member.Name,
EnterpriseMeta: *nodeEntMeta,
ID: types.NodeID(member.Tags["id"]),
Address: member.Addr.String(),
Check: &structs.HealthCheck{
Node: member.Name,
CheckID: structs.SerfCheckID,
@ -1381,18 +1403,22 @@ func (s *Server) handleFailedMember(member serf.Member) error {
// handleLeftMember is used to handle members that gracefully
// left. They are deregistered if necessary.
func (s *Server) handleLeftMember(member serf.Member) error {
return s.handleDeregisterMember("left", member)
func (s *Server) handleLeftMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
return s.handleDeregisterMember("left", member, nodeEntMeta)
}
// handleReapMember is used to handle members that have been
// reaped after a prolonged failure. They are deregistered.
func (s *Server) handleReapMember(member serf.Member) error {
return s.handleDeregisterMember("reaped", member)
func (s *Server) handleReapMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
return s.handleDeregisterMember("reaped", member, nodeEntMeta)
}
// handleDeregisterMember is used to deregister a member of a given reason
func (s *Server) handleDeregisterMember(reason string, member serf.Member) error {
func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
if nodeEntMeta == nil {
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
}
// Do not deregister ourself. This can only happen if the current leader
// is leaving. Instead, we should allow a follower to take-over and
// deregister us later.
@ -1410,8 +1436,7 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member) error
// Check if the node does not exist
state := s.fsm.State()
// TODO(partitions)
_, node, err := state.GetNode(member.Name, nil)
_, node, err := state.GetNode(member.Name, nodeEntMeta)
if err != nil {
return err
}
@ -1422,8 +1447,9 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member) error
// Deregister the node
s.logger.Info("deregistering member", "member", member.Name, "reason", reason)
req := structs.DeregisterRequest{
Datacenter: s.config.Datacenter,
Node: member.Name,
Datacenter: s.config.Datacenter,
Node: member.Name,
EnterpriseMeta: *nodeEntMeta,
}
_, err = s.raftApply(structs.DeregisterRequestType, &req)
return err

View File

@ -49,7 +49,6 @@ func TestLeader_RegisterMember(t *testing.T) {
// Client should be registered
state := s1.fsm.State()
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -79,7 +78,6 @@ func TestLeader_RegisterMember(t *testing.T) {
// Server should be registered
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(s1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -129,7 +127,6 @@ func TestLeader_FailedMember(t *testing.T) {
// Should be registered
state := s1.fsm.State()
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -191,7 +188,6 @@ func TestLeader_LeftMember(t *testing.T) {
// Should be registered
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -207,7 +203,6 @@ func TestLeader_LeftMember(t *testing.T) {
// Should be deregistered
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -243,7 +238,6 @@ func TestLeader_ReapMember(t *testing.T) {
// Should be registered
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -269,7 +263,6 @@ func TestLeader_ReapMember(t *testing.T) {
// anti-entropy will put it back.
reaped := false
for start := time.Now(); time.Since(start) < 5*time.Second; {
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -367,7 +360,7 @@ func TestLeader_CheckServersMeta(t *testing.T) {
member.Tags["nonvoter"] = "1"
member.Tags["read_replica"] = "1"
member.Tags["build"] = versionToExpect
err := s1.handleAliveMember(member)
err := s1.handleAliveMember(member, nil)
if err != nil {
r.Fatalf("Unexpected error :%v", err)
}
@ -439,7 +432,6 @@ func TestLeader_ReapServer(t *testing.T) {
// s3 should be registered
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(s3.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -454,14 +446,13 @@ func TestLeader_ReapServer(t *testing.T) {
knownMembers[s1.config.NodeName] = struct{}{}
knownMembers[s2.config.NodeName] = struct{}{}
err := s1.reconcileReaped(knownMembers)
err := s1.reconcileReaped(knownMembers, nil)
if err != nil {
t.Fatalf("Unexpected error :%v", err)
}
// s3 should be deregistered
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(s3.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -517,7 +508,6 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) {
// Node should be gone
state := s1.fsm.State()
// TODO(partitions)
_, node, err := state.GetNode("no-longer-around", nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -551,7 +541,6 @@ func TestLeader_Reconcile(t *testing.T) {
// Should not be registered
state := s1.fsm.State()
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -562,7 +551,6 @@ func TestLeader_Reconcile(t *testing.T) {
// Should be registered
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -595,7 +583,6 @@ func TestLeader_Reconcile_Races(t *testing.T) {
state := s1.fsm.State()
var nodeAddr string
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
@ -632,7 +619,6 @@ func TestLeader_Reconcile_Races(t *testing.T) {
if err := s1.reconcile(); err != nil {
t.Fatalf("err: %v", err)
}
// TODO(partitions)
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -657,7 +643,6 @@ func TestLeader_Reconcile_Races(t *testing.T) {
})
// Make sure the metadata didn't get clobbered.
// TODO(partitions)
_, node, err = state.GetNode(c1.config.NodeName, nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -773,7 +758,6 @@ func TestLeader_LeftLeader(t *testing.T) {
// Verify the old leader is deregistered
state := remain.fsm.State()
retry.Run(t, func(r *retry.R) {
// TODO(partitions)
_, node, err := state.GetNode(leader.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)

View File

@ -3,10 +3,11 @@ package consul
import (
"fmt"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-version"
"github.com/hashicorp/serf/serf"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/consul/types"
)
// lanMergeDelegate is used to handle a cluster merge on the LAN gossip
@ -17,6 +18,14 @@ type lanMergeDelegate struct {
nodeID types.NodeID
nodeName string
segment string
// TODO(partitions): use server and partition to reject gossip messages
// from nodes in the wrong partition depending upon the role the node is
// playing. For example servers will always be in the default partition,
// but all clients in all partitions should be aware of the servers so that
// general RPC routing works.
server bool
partition string
}
// uniqueIDMinVersion is the lowest version where we insist that nodes

View File

@ -6,10 +6,11 @@ import (
"regexp"
"strings"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/hil"
"github.com/hashicorp/hil/ast"
"github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/agent/structs"
)
// IsTemplate returns true if the given query is a template.
@ -89,6 +90,7 @@ func Compile(query *structs.PreparedQuery) (*CompiledTemplate, error) {
// prefix it will be expected to run with. The results might not make
// sense and create a valid service to lookup, but it should render
// without any errors.
// TODO(partitions) should this have a partition on it?
if _, err = ct.Render(ct.query.Name, structs.QuerySource{}); err != nil {
return nil, err
}
@ -156,6 +158,7 @@ func (ct *CompiledTemplate) Render(name string, source structs.QuerySource) (*st
Type: ast.TypeString,
Value: source.Segment,
},
// TODO(partitions): should NodePartition be projected here?
},
FuncMap: map[string]ast.Function{
"match": match,

View File

@ -402,8 +402,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
qs.Node = args.Agent.Node
} else if qs.Node == "_ip" {
if args.Source.Ip != "" {
// TODO(partitions)
_, nodes, err := state.Nodes(nil, nil)
_, nodes, err := state.Nodes(nil, structs.NodeEnterpriseMetaInDefaultPartition())
if err != nil {
return err
}

View File

@ -22,8 +22,7 @@ func (s *Server) newNodeSorter(cs lib.CoordinateSet, nodes structs.Nodes) (sort.
state := s.fsm.State()
vec := make([]float64, len(nodes))
for i, node := range nodes {
// TODO(partitions)
_, other, err := state.Coordinate(nil, node.Node, nil)
_, other, err := state.Coordinate(nil, node.Node, node.GetEnterpriseMeta())
if err != nil {
return nil, err
}
@ -63,8 +62,7 @@ func (s *Server) newServiceNodeSorter(cs lib.CoordinateSet, nodes structs.Servic
state := s.fsm.State()
vec := make([]float64, len(nodes))
for i, node := range nodes {
// TODO(partitions)
_, other, err := state.Coordinate(nil, node.Node, nil)
_, other, err := state.Coordinate(nil, node.Node, &node.EnterpriseMeta)
if err != nil {
return nil, err
}
@ -104,8 +102,7 @@ func (s *Server) newHealthCheckSorter(cs lib.CoordinateSet, checks structs.Healt
state := s.fsm.State()
vec := make([]float64, len(checks))
for i, check := range checks {
// TODO(partitions)
_, other, err := state.Coordinate(nil, check.Node, nil)
_, other, err := state.Coordinate(nil, check.Node, &check.EnterpriseMeta)
if err != nil {
return nil, err
}
@ -145,8 +142,7 @@ func (s *Server) newCheckServiceNodeSorter(cs lib.CoordinateSet, nodes structs.C
state := s.fsm.State()
vec := make([]float64, len(nodes))
for i, node := range nodes {
// TODO(partitions)
_, other, err := state.Coordinate(nil, node.Node.Node, nil)
_, other, err := state.Coordinate(nil, node.Node.Node, node.Node.GetEnterpriseMeta())
if err != nil {
return nil, err
}

View File

@ -8,8 +8,9 @@ import (
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/serf/serf"
"github.com/hashicorp/consul/agent/structs"
)
var SegmentOSSSummaries = []prometheus.SummaryDefinition{
@ -62,12 +63,17 @@ func (s *Server) setupSegments(config *Config, port int, rpcListeners map[string
func (s *Server) floodSegments(config *Config) {
}
func getSerfMemberEnterpriseMeta(member serf.Member) *structs.EnterpriseMeta {
return structs.NodeEnterpriseMetaInDefaultPartition()
}
// reconcile is used to reconcile the differences between Serf membership and
// what is reflected in our strongly consistent store. Mainly we need to ensure
// all live nodes are registered, all failed nodes are marked as such, and all
// left nodes are deregistered.
func (s *Server) reconcile() (err error) {
defer metrics.MeasureSince([]string{"leader", "reconcile"}, time.Now())
members := s.serfLAN.Members()
knownMembers := make(map[string]struct{})
for _, member := range members {
@ -79,5 +85,5 @@ func (s *Server) reconcile() (err error) {
// Reconcile any members that have been reaped while we were not the
// leader.
return s.reconcileReaped(knownMembers)
return s.reconcileReaped(knownMembers, nil)
}

View File

@ -117,6 +117,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w
nodeID: s.config.NodeID,
nodeName: s.config.NodeName,
segment: segment,
server: true,
}
}
@ -175,7 +176,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w
conf.ReconnectTimeoutOverride = libserf.NewReconnectOverride(s.logger)
addEnterpriseSerfTags(conf.Tags)
addEnterpriseSerfTags(conf.Tags, s.config.agentEnterpriseMeta())
if s.config.OverrideInitialSerfTags != nil {
s.config.OverrideInitialSerfTags(conf.Tags)

View File

@ -6,6 +6,7 @@ import (
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/agent/structs"
)
@ -46,6 +47,7 @@ func (s *Server) initializeSessionTimers() error {
// Scan all sessions and reset their timer
state := s.fsm.State()
// TODO(partitions): track all session timers in all partitions
_, sessions, err := state.SessionList(nil, structs.WildcardEnterpriseMetaInDefaultPartition())
if err != nil {
return err

View File

@ -1015,7 +1015,7 @@ func aclTokenDeleteTxn(tx WriteTxn, idx uint64, value, index string, entMeta *st
func aclTokenDeleteAllForAuthMethodTxn(tx WriteTxn, idx uint64, methodName string, methodGlobalLocality bool, methodMeta *structs.EnterpriseMeta) error {
// collect all the tokens linked with the given auth method.
iter, err := aclTokenListByAuthMethod(tx, methodName, methodMeta, structs.WildcardEnterpriseMetaInDefaultPartition())
iter, err := aclTokenListByAuthMethod(tx, methodName, methodMeta, methodMeta.WildcardEnterpriseMetaForPartition())
if err != nil {
return fmt.Errorf("failed acl token lookup: %v", err)
}

View File

@ -212,6 +212,19 @@ func indexFromUUIDQuery(raw interface{}) ([]byte, error) {
return uuidStringToBytes(q.Value)
}
func prefixIndexFromUUIDQuery(arg interface{}) ([]byte, error) {
switch v := arg.(type) {
case *structs.EnterpriseMeta:
return nil, nil
case structs.EnterpriseMeta:
return nil, nil
case Query:
return variableLengthUUIDStringToBytes(v.Value)
}
return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg)
}
func multiIndexPolicyFromACLRole(raw interface{}) ([][]byte, error) {
role, ok := raw.(*structs.ACLRole)
if !ok {

View File

@ -7,7 +7,6 @@ import (
"strings"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid"
"github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/acl"
@ -275,8 +274,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod
// name is the same.
var n *structs.Node
if node.ID != "" {
// TODO(partitions): should this take a node ent-meta?
existing, err := getNodeIDTxn(tx, node.ID)
existing, err := getNodeIDTxn(tx, node.ID, node.GetEnterpriseMeta())
if err != nil {
return fmt.Errorf("node lookup failed: %s", err)
}
@ -382,14 +380,11 @@ func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *structs.EnterpriseMeta
return nil, nil
}
func getNodeIDTxn(tx ReadTxn, id types.NodeID) (*structs.Node, error) {
strnode := string(id)
uuidValue, err := uuid.ParseUUID(strnode)
if err != nil {
return nil, fmt.Errorf("node lookup by ID failed, wrong UUID: %v for '%s'", err, strnode)
}
node, err := tx.First(tableNodes, "uuid", uuidValue)
func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *structs.EnterpriseMeta) (*structs.Node, error) {
node, err := tx.First(tableNodes, indexUUID+"_prefix", Query{
Value: string(id),
EnterpriseMeta: *entMeta,
})
if err != nil {
return nil, fmt.Errorf("node lookup by ID failed: %s", err)
}
@ -400,17 +395,20 @@ func getNodeIDTxn(tx ReadTxn, id types.NodeID) (*structs.Node, error) {
}
// GetNodeID is used to retrieve a node registration by node ID.
func (s *Store) GetNodeID(id types.NodeID) (uint64, *structs.Node, error) {
func (s *Store) GetNodeID(id types.NodeID, entMeta *structs.EnterpriseMeta) (uint64, *structs.Node, error) {
tx := s.db.Txn(false)
defer tx.Abort()
// TODO: accept non-pointer value
if entMeta == nil {
entMeta = structs.NodeEnterpriseMetaInDefaultPartition()
}
// Get the table index.
///
// NOTE: nodeIDs aren't partitioned so don't use the convenience function.
idx := maxIndexTxn(tx, tableNodes)
idx := catalogNodesMaxIndex(tx, entMeta)
// Retrieve the node from the state store
node, err := getNodeIDTxn(tx, id)
node, err := getNodeIDTxn(tx, id, entMeta)
return idx, node, err
}
@ -453,19 +451,25 @@ func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMet
}
// Get the table index.
idx := maxIndexTxn(tx, tableNodes)
// TODO:(partitions) use the partitioned meta index
// idx := catalogNodesMaxIndex(tx, entMeta)
_ = entMeta
idx := catalogNodesMaxIndex(tx, entMeta)
// Retrieve all of the nodes
var args []interface{}
for key, value := range filters {
args = append(args, key, value)
if len(filters) == 0 {
return idx, nil, nil // NodesByMeta is never called with an empty map, but just in case make it return no results.
}
// Retrieve all of the nodes. We'll do a lookup of just ONE KV pair, which
// over-matches if multiple pairs are requested, but then in the loop below
// we'll finish filtering.
var firstKey, firstValue string
for firstKey, firstValue = range filters {
break
}
nodes, err := tx.Get(tableNodes, "meta", args...)
nodes, err := tx.Get(tableNodes, indexMeta, KeyValueQuery{
Key: firstKey,
Value: firstValue,
EnterpriseMeta: *entMeta,
})
if err != nil {
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
}
@ -829,20 +833,34 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
tx := s.db.Txn(false)
defer tx.Abort()
// TODO: accept non-pointer value
if entMeta == nil {
entMeta = structs.NodeEnterpriseMetaInDefaultPartition()
}
// Get the table index.
idx := catalogServicesMaxIndex(tx, entMeta)
if nodeIdx := catalogNodesMaxIndex(tx, entMeta); nodeIdx > idx {
idx = nodeIdx
}
// Retrieve all of the nodes with the meta k/v pair
var args []interface{}
for key, value := range filters {
args = append(args, key, value)
if len(filters) == 0 {
return idx, nil, nil // ServicesByNodeMeta is never called with an empty map, but just in case make it return no results.
}
// Retrieve all of the nodes. We'll do a lookup of just ONE KV pair, which
// over-matches if multiple pairs are requested, but then in the loop below
// we'll finish filtering.
var firstKey, firstValue string
for firstKey, firstValue = range filters {
break
}
// TODO(partitions): scope the meta index to a partition
nodes, err := tx.Get(tableNodes, "meta", args...)
nodes, err := tx.Get(tableNodes, indexMeta, KeyValueQuery{
Key: firstKey,
Value: firstValue,
EnterpriseMeta: *entMeta,
})
if err != nil {
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
}
@ -1274,7 +1292,10 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *st
}
// Attempt to lookup the node by its node ID
iter, err := tx.Get(tableNodes, "uuid_prefix", resizeNodeLookupKey(nodeNameOrID))
iter, err := tx.Get(tableNodes, indexUUID+"_prefix", Query{
Value: resizeNodeLookupKey(nodeNameOrID),
EnterpriseMeta: *entMeta,
})
if err != nil {
ws.Add(watchCh)
// TODO(sean@): We could/should log an error re: the uuid_prefix lookup

View File

@ -25,14 +25,15 @@ type EventPayloadCheckServiceNode struct {
// when the change event is for a sidecar or gateway.
overrideKey string
overrideNamespace string
overridePartition string
}
func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bool {
return e.Value.CanRead(authz) == acl.Allow
}
func (e EventPayloadCheckServiceNode) MatchesKey(key, namespace string) bool {
if key == "" && namespace == "" {
func (e EventPayloadCheckServiceNode) MatchesKey(key, namespace, partition string) bool {
if key == "" && namespace == "" && partition == "" {
return true
}
@ -48,8 +49,14 @@ func (e EventPayloadCheckServiceNode) MatchesKey(key, namespace string) bool {
if e.overrideNamespace != "" {
ns = e.overrideNamespace
}
ap := e.Value.Service.EnterpriseMeta.PartitionOrDefault()
if e.overridePartition != "" {
ap = e.overridePartition
}
return (key == "" || strings.EqualFold(key, name)) &&
(namespace == "" || strings.EqualFold(namespace, ns))
(namespace == "" || strings.EqualFold(namespace, ns)) &&
(partition == "" || strings.EqualFold(partition, ap))
}
// serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot
@ -60,7 +67,7 @@ func serviceHealthSnapshot(db ReadDB, topic stream.Topic) stream.SnapshotFunc {
defer tx.Abort()
connect := topic == topicServiceHealthConnect
entMeta := structs.NewEnterpriseMetaInDefaultPartition(req.Namespace)
entMeta := structs.NewEnterpriseMetaWithPartition(req.Partition, req.Namespace)
idx, nodes, err := checkServiceNodesTxn(tx, nil, req.Key, connect, &entMeta)
if err != nil {
return 0, err
@ -123,6 +130,11 @@ type serviceChange struct {
change memdb.Change
}
type nodeTuple struct {
Node string
Partition string
}
var serviceChangeIndirect = serviceChange{changeType: changeIndirect}
// ServiceHealthEventsFromChanges returns all the service and Connect health
@ -130,13 +142,13 @@ var serviceChangeIndirect = serviceChange{changeType: changeIndirect}
func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) {
var events []stream.Event
var nodeChanges map[string]changeType
var nodeChanges map[nodeTuple]changeType
var serviceChanges map[nodeServiceTuple]serviceChange
var termGatewayChanges map[structs.ServiceName]map[structs.ServiceName]serviceChange
markNode := func(node string, typ changeType) {
markNode := func(node nodeTuple, typ changeType) {
if nodeChanges == nil {
nodeChanges = make(map[string]changeType)
nodeChanges = make(map[nodeTuple]changeType)
}
// If the caller has an actual node mutation ensure we store it even if the
// node is already marked. If the caller is just marking the node dirty
@ -161,14 +173,15 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
for _, change := range changes.Changes {
switch change.Table {
case "nodes":
case tableNodes:
// Node changed in some way, if it's not a delete, we'll need to
// re-deliver CheckServiceNode results for all services on that node but
// we mark it anyway because if it _is_ a delete then we need to know that
// later to avoid trying to deliver events when node level checks mark the
// node as "changed".
n := changeObject(change).(*structs.Node)
markNode(n.Node, changeTypeFromChange(change))
tuple := newNodeTupleFromNode(n)
markNode(tuple, changeTypeFromChange(change))
case tableServices:
sn := changeObject(change).(*structs.ServiceNode)
@ -187,7 +200,8 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
after := change.After.(*structs.HealthCheck)
if after.ServiceID == "" || before.ServiceID == "" {
// check before and/or after is node-scoped
markNode(after.Node, changeIndirect)
nt := newNodeTupleFromHealthCheck(after)
markNode(nt, changeIndirect)
} else {
// Check changed which means we just need to emit for the linked
// service.
@ -206,7 +220,8 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
obj := changeObject(change).(*structs.HealthCheck)
if obj.ServiceID == "" {
// Node level check
markNode(obj.Node, changeIndirect)
nt := newNodeTupleFromHealthCheck(obj)
markNode(nt, changeIndirect)
} else {
markService(newNodeServiceTupleFromServiceHealthCheck(obj), serviceChangeIndirect)
}
@ -250,7 +265,8 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
continue
}
// Rebuild events for all services on this node
es, err := newServiceHealthEventsForNode(tx, changes.Index, node)
es, err := newServiceHealthEventsForNode(tx, changes.Index, node.Node,
structs.WildcardEnterpriseMetaInPartition(node.Partition))
if err != nil {
return nil, err
}
@ -286,7 +302,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
}
}
if _, ok := nodeChanges[tuple.Node]; ok {
if _, ok := nodeChanges[tuple.nodeTuple()]; ok {
// We already rebuilt events for everything on this node, no need to send
// a duplicate.
continue
@ -303,7 +319,10 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
for serviceName, gsChange := range serviceChanges {
gs := changeObject(gsChange.change).(*structs.GatewayService)
q := Query{Value: gs.Gateway.Name, EnterpriseMeta: gatewayName.EnterpriseMeta}
q := Query{
Value: gs.Gateway.Name,
EnterpriseMeta: gatewayName.EnterpriseMeta,
}
_, nodes, err := serviceNodesTxn(tx, nil, indexService, q)
if err != nil {
return nil, err
@ -320,6 +339,9 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() {
payload.overrideNamespace = serviceName.EnterpriseMeta.NamespaceOrDefault()
}
if gatewayName.EnterpriseMeta.PartitionOrDefault() != serviceName.EnterpriseMeta.PartitionOrDefault() {
payload.overridePartition = serviceName.EnterpriseMeta.PartitionOrDefault()
}
e.Payload = payload
events = append(events, e)
@ -344,6 +366,9 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() {
payload.overrideNamespace = serviceName.EnterpriseMeta.NamespaceOrDefault()
}
if gatewayName.EnterpriseMeta.PartitionOrDefault() != serviceName.EnterpriseMeta.PartitionOrDefault() {
payload.overridePartition = serviceName.EnterpriseMeta.PartitionOrDefault()
}
e.Payload = payload
events = append(events, e)
@ -480,6 +505,9 @@ func copyEventForService(event stream.Event, service structs.ServiceName) stream
if payload.Value.Service.EnterpriseMeta.NamespaceOrDefault() != service.EnterpriseMeta.NamespaceOrDefault() {
payload.overrideNamespace = service.EnterpriseMeta.NamespaceOrDefault()
}
if payload.Value.Service.EnterpriseMeta.PartitionOrDefault() != service.EnterpriseMeta.PartitionOrDefault() {
payload.overridePartition = service.EnterpriseMeta.PartitionOrDefault()
}
event.Payload = payload
return event
@ -497,13 +525,16 @@ func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNod
// given node. This mirrors some of the the logic in the oddly-named
// parseCheckServiceNodes but is more efficient since we know they are all on
// the same node.
func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]stream.Event, error) {
services, err := tx.Get(tableServices, indexNode, Query{Value: node})
func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *structs.EnterpriseMeta) ([]stream.Event, error) {
services, err := tx.Get(tableServices, indexNode, Query{
Value: node,
EnterpriseMeta: *entMeta,
})
if err != nil {
return nil, err
}
n, checksFunc, err := getNodeAndChecks(tx, node)
n, checksFunc, err := getNodeAndChecks(tx, node, entMeta)
if err != nil {
return nil, err
}
@ -521,9 +552,12 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]strea
// getNodeAndNodeChecks returns a the node structure and a function that returns
// the full list of checks for a specific service on that node.
func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc, error) {
func getNodeAndChecks(tx ReadTxn, node string, entMeta *structs.EnterpriseMeta) (*structs.Node, serviceChecksFunc, error) {
// Fetch the node
nodeRaw, err := tx.First(tableNodes, indexID, Query{Value: node})
nodeRaw, err := tx.First(tableNodes, indexID, Query{
Value: node,
EnterpriseMeta: *entMeta,
})
if err != nil {
return nil, nil, err
}
@ -532,7 +566,10 @@ func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc
}
n := nodeRaw.(*structs.Node)
iter, err := tx.Get(tableChecks, indexNode, Query{Value: node})
iter, err := tx.Get(tableChecks, indexNode, Query{
Value: node,
EnterpriseMeta: *entMeta,
})
if err != nil {
return nil, nil, err
}
@ -566,12 +603,16 @@ func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc
type serviceChecksFunc func(serviceID string) structs.HealthChecks
func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTuple) (stream.Event, error) {
n, checksFunc, err := getNodeAndChecks(tx, tuple.Node)
n, checksFunc, err := getNodeAndChecks(tx, tuple.Node, &tuple.EntMeta)
if err != nil {
return stream.Event{}, err
}
svc, err := tx.Get(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: tuple.EntMeta, Node: tuple.Node, Service: tuple.ServiceID})
svc, err := tx.Get(tableServices, indexID, NodeServiceQuery{
EnterpriseMeta: tuple.EntMeta,
Node: tuple.Node,
Service: tuple.ServiceID,
})
if err != nil {
return stream.Event{}, err
}
@ -615,9 +656,14 @@ func newServiceHealthEventDeregister(idx uint64, sn *structs.ServiceNode) stream
// This is also important because if the service was deleted as part of a
// whole node deregistering then the node record won't actually exist now
// anyway and we'd have to plumb it through from the changeset above.
entMeta := sn.EnterpriseMeta
entMeta.Normalize()
csn := &structs.CheckServiceNode{
Node: &structs.Node{
Node: sn.Node,
Node: sn.Node,
Partition: entMeta.PartitionOrEmpty(),
},
Service: sn.ToNodeService(),
}

View File

@ -0,0 +1,23 @@
// +build !consulent
package state
import "github.com/hashicorp/consul/agent/structs"
func (nst nodeServiceTuple) nodeTuple() nodeTuple {
return nodeTuple{Node: nst.Node, Partition: ""}
}
func newNodeTupleFromNode(node *structs.Node) nodeTuple {
return nodeTuple{
Node: node.Node,
Partition: "",
}
}
func newNodeTupleFromHealthCheck(hc *structs.HealthCheck) nodeTuple {
return nodeTuple{
Node: hc.Node,
Partition: "",
}
}

View File

@ -1605,9 +1605,9 @@ func (tc eventsTestCase) run(t *testing.T) {
assertDeepEqual(t, tc.WantEvents, got, cmpPartialOrderEvents, cmpopts.EquateEmpty())
}
func runCase(t *testing.T, name string, fn func(t *testing.T)) {
func runCase(t *testing.T, name string, fn func(t *testing.T)) bool {
t.Helper()
t.Run(name, func(t *testing.T) {
return t.Run(name, func(t *testing.T) {
t.Helper()
t.Log("case:", name)
fn(t)
@ -1680,7 +1680,11 @@ var cmpPartialOrderEvents = cmp.Options{
if payload.overrideNamespace != "" {
ns = payload.overrideNamespace
}
return fmt.Sprintf("%s/%s/%s/%s", e.Topic, csn.Node.Node, ns, name)
ap := csn.Service.EnterpriseMeta.PartitionOrDefault()
if payload.overridePartition != "" {
ap = payload.overridePartition
}
return fmt.Sprintf("%s/%s/%s/%s/%s", e.Topic, ap, csn.Node.Node, ns, name)
}
return key(i) < key(j)
}),
@ -2172,6 +2176,7 @@ func newTestEventServiceHealthRegister(index uint64, nodeNum int, svc string) st
Node: node,
Address: addr,
Datacenter: "dc1",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
RaftIndex: structs.RaftIndex{
CreateIndex: index,
ModifyIndex: index,
@ -2238,7 +2243,8 @@ func newTestEventServiceHealthDeregister(index uint64, nodeNum int, svc string)
Op: pbsubscribe.CatalogOp_Deregister,
Value: &structs.CheckServiceNode{
Node: &structs.Node{
Node: fmt.Sprintf("node%d", nodeNum),
Node: fmt.Sprintf("node%d", nodeNum),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
Service: &structs.NodeService{
ID: svc,
@ -2270,6 +2276,7 @@ func TestEventPayloadCheckServiceNode_FilterByKey(t *testing.T) {
payload EventPayloadCheckServiceNode
key string
namespace string
partition string // TODO(partitions): create test cases for this being set
expected bool
}
@ -2278,7 +2285,7 @@ func TestEventPayloadCheckServiceNode_FilterByKey(t *testing.T) {
t.Skip("cant test namespace matching without namespace support")
}
require.Equal(t, tc.expected, tc.payload.MatchesKey(tc.key, tc.namespace))
require.Equal(t, tc.expected, tc.payload.MatchesKey(tc.key, tc.namespace, tc.partition))
}
var testCases = []testCase{

View File

@ -4,6 +4,7 @@ package state
import (
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/types"
)
func testIndexerTableChecks() map[string]indexerTestCase {
@ -175,6 +176,8 @@ func testIndexerTableGatewayServices() map[string]indexerTestCase {
}
func testIndexerTableNodes() map[string]indexerTestCase {
uuidBuf, uuid := generateUUID()
return map[string]indexerTestCase{
indexID: {
read: indexValue{
@ -200,8 +203,59 @@ func testIndexerTableNodes() map[string]indexerTestCase {
},
},
},
// TODO: uuid
// TODO: meta
indexUUID: {
read: indexValue{
source: Query{Value: uuid},
expected: uuidBuf,
},
write: indexValue{
source: &structs.Node{
ID: types.NodeID(uuid),
Node: "NoDeId",
},
expected: uuidBuf,
},
prefix: []indexValue{
{
source: (*structs.EnterpriseMeta)(nil),
expected: nil,
},
{
source: structs.EnterpriseMeta{},
expected: nil,
},
{ // partial length
source: Query{Value: uuid[:6]},
expected: uuidBuf[:3],
},
{ // full length
source: Query{Value: uuid},
expected: uuidBuf,
},
},
},
indexMeta: {
read: indexValue{
source: KeyValueQuery{
Key: "KeY",
Value: "VaLuE",
},
expected: []byte("KeY\x00VaLuE\x00"),
},
writeMulti: indexValueMulti{
source: &structs.Node{
Node: "NoDeId",
Meta: map[string]string{
"MaP-kEy-1": "mAp-VaL-1",
"mAp-KeY-2": "MaP-vAl-2",
},
},
expected: [][]byte{
[]byte("MaP-kEy-1\x00mAp-VaL-1\x00"),
[]byte("mAp-KeY-2\x00MaP-vAl-2\x00"),
},
},
},
// TODO(partitions): fix schema tests for tables that reference nodes too
}

View File

@ -27,6 +27,8 @@ const (
indexUpstream = "upstream"
indexDownstream = "downstream"
indexGateway = "gateway"
indexUUID = "uuid"
indexMeta = "meta"
)
// nodesTableSchema returns a new table schema used for storing struct.Node.
@ -44,19 +46,23 @@ func nodesTableSchema() *memdb.TableSchema {
prefixIndex: prefixIndexFromQueryNoNamespace,
},
},
"uuid": {
Name: "uuid",
indexUUID: {
Name: indexUUID,
AllowMissing: true,
Unique: true,
Indexer: &memdb.UUIDFieldIndex{Field: "ID"},
Indexer: indexerSingleWithPrefix{
readIndex: indexFromUUIDQuery,
writeIndex: indexIDFromNode,
prefixIndex: prefixIndexFromUUIDQuery,
},
},
"meta": {
Name: "meta",
indexMeta: {
Name: indexMeta,
AllowMissing: true,
Unique: false,
Indexer: &memdb.StringMapFieldIndex{
Field: "Meta",
Lowercase: false,
Indexer: indexerMulti{
readIndex: indexFromKeyValueQuery,
writeIndexMulti: indexMetaFromNode,
},
},
},
@ -78,6 +84,50 @@ func indexFromNode(raw interface{}) ([]byte, error) {
return b.Bytes(), nil
}
func indexIDFromNode(raw interface{}) ([]byte, error) {
n, ok := raw.(*structs.Node)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Node index", raw)
}
if n.ID == "" {
return nil, errMissingValueForIndex
}
v, err := uuidStringToBytes(string(n.ID))
if err != nil {
return nil, err
}
return v, nil
}
func indexMetaFromNode(raw interface{}) ([][]byte, error) {
n, ok := raw.(*structs.Node)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Node index", raw)
}
// NOTE: this is case-sensitive!
vals := make([][]byte, 0, len(n.Meta))
for key, val := range n.Meta {
if key == "" {
continue
}
var b indexBuilder
b.String(key)
b.String(val)
vals = append(vals, b.Bytes())
}
if len(vals) == 0 {
return nil, errMissingValueForIndex
}
return vals, nil
}
// servicesTableSchema returns a new table schema used to store information
// about services.
func servicesTableSchema() *memdb.TableSchema {

View File

@ -1,6 +1,7 @@
package state
import (
crand "crypto/rand"
"fmt"
"reflect"
"sort"
@ -28,23 +29,24 @@ func makeRandomNodeID(t *testing.T) types.NodeID {
func TestStateStore_GetNodeID(t *testing.T) {
s := testStateStore(t)
_, out, err := s.GetNodeID(types.NodeID("wrongId"))
if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed, wrong UUID") {
t.Fatalf("want an error, nil value, err:=%q ; out:=%q", err.Error(), out)
_, out, err := s.GetNodeID(types.NodeID("wrongId"), nil)
if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed: index error: UUID (without hyphens) must be") {
t.Errorf("want an error, nil value, err:=%q ; out:=%q", err.Error(), out)
}
_, out, err = s.GetNodeID(types.NodeID("0123456789abcdefghijklmnopqrstuvwxyz"))
if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed, wrong UUID") {
t.Fatalf("want an error, nil value, err:=%q ; out:=%q", err, out)
_, out, err = s.GetNodeID(types.NodeID("0123456789abcdefghijklmnopqrstuvwxyz"), nil)
if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed: index error: invalid UUID") {
t.Errorf("want an error, nil value, err:=%q ; out:=%q", err, out)
}
_, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee50Z"))
if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed, wrong UUID") {
t.Fatalf("want an error, nil value, err:=%q ; out:=%q", err, out)
_, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee50Z"), nil)
if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed: index error: invalid UUID") {
t.Errorf("want an error, nil value, err:=%q ; out:=%q", err, out)
}
_, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee506"))
_, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee506"), nil)
if err != nil || out != nil {
t.Fatalf("do not want any error nor returned value, err:=%q ; out:=%q", err, out)
t.Errorf("do not want any error nor returned value, err:=%q ; out:=%q", err, out)
}
nodeID := types.NodeID("00a916bc-a357-4a19-b886-59419fceeaaa")
@ -53,30 +55,53 @@ func TestStateStore_GetNodeID(t *testing.T) {
Node: "node1",
Address: "1.2.3.4",
}
if err := s.EnsureRegistration(1, req); err != nil {
t.Fatalf("err: %s", err)
}
require.NoError(t, s.EnsureRegistration(1, req))
_, out, err = s.GetNodeID(nodeID)
if err != nil {
t.Fatalf("got err %s want nil", err)
}
_, out, err = s.GetNodeID(nodeID, nil)
require.NoError(t, err)
if out == nil || out.ID != nodeID {
t.Fatalf("out should not be nil and contain nodeId, but was:=%#v", out)
}
// Case insensitive lookup should work as well
_, out, err = s.GetNodeID(types.NodeID("00a916bC-a357-4a19-b886-59419fceeAAA"))
if err != nil {
t.Fatalf("got err %s want nil", err)
}
_, out, err = s.GetNodeID(types.NodeID("00a916bC-a357-4a19-b886-59419fceeAAA"), nil)
require.NoError(t, err)
if out == nil || out.ID != nodeID {
t.Fatalf("out should not be nil and contain nodeId, but was:=%#v", out)
}
}
func TestStateStore_GetNode(t *testing.T) {
s := testStateStore(t)
// initially does not exist
idx, out, err := s.GetNode("node1", nil)
require.NoError(t, err)
require.Nil(t, out)
require.Equal(t, uint64(0), idx)
// Create it
testRegisterNode(t, s, 1, "node1")
// now exists
idx, out, err = s.GetNode("node1", nil)
require.NoError(t, err)
require.NotNil(t, out)
require.Equal(t, uint64(1), idx)
require.Equal(t, "node1", out.Node)
// Case insensitive lookup should work as well
idx, out, err = s.GetNode("NoDe1", nil)
require.NoError(t, err)
require.NotNil(t, out)
require.Equal(t, uint64(1), idx)
require.Equal(t, "node1", out.Node)
}
func TestStateStore_ensureNoNodeWithSimilarNameTxn(t *testing.T) {
t.Parallel()
s := testStateStore(t)
nodeID := makeRandomNodeID(t)
req := &structs.RegisterRequest{
ID: nodeID,
@ -90,9 +115,7 @@ func TestStateStore_ensureNoNodeWithSimilarNameTxn(t *testing.T) {
Status: api.HealthPassing,
},
}
if err := s.EnsureRegistration(1, req); err != nil {
t.Fatalf("err: %s", err)
}
require.NoError(t, s.EnsureRegistration(1, req))
req = &structs.RegisterRequest{
ID: types.NodeID(""),
Node: "node2",
@ -103,31 +126,29 @@ func TestStateStore_ensureNoNodeWithSimilarNameTxn(t *testing.T) {
Status: api.HealthPassing,
},
}
if err := s.EnsureRegistration(2, req); err != nil {
t.Fatalf("err: %s", err)
}
require.NoError(t, s.EnsureRegistration(2, req))
tx := s.db.WriteTxnRestore()
defer tx.Abort()
node := &structs.Node{
ID: makeRandomNodeID(t),
Node: "NOdE1", // Name is similar but case is different
Address: "2.3.4.5",
}
// Lets conflict with node1 (has an ID)
if err := ensureNoNodeWithSimilarNameTxn(tx, node, false); err == nil {
t.Fatalf("Should return an error since another name with similar name exists")
}
if err := ensureNoNodeWithSimilarNameTxn(tx, node, true); err == nil {
t.Fatalf("Should return an error since another name with similar name exists")
}
require.Error(t, ensureNoNodeWithSimilarNameTxn(tx, node, false),
"Should return an error since another name with similar name exists")
require.Error(t, ensureNoNodeWithSimilarNameTxn(tx, node, true),
"Should return an error since another name with similar name exists")
// Lets conflict with node without ID
node.Node = "NoDe2"
if err := ensureNoNodeWithSimilarNameTxn(tx, node, false); err == nil {
t.Fatalf("Should return an error since another name with similar name exists")
}
if err := ensureNoNodeWithSimilarNameTxn(tx, node, true); err != nil {
t.Fatalf("Should not clash with another similar node name without ID, err:=%q", err)
}
require.Error(t, ensureNoNodeWithSimilarNameTxn(tx, node, false),
"Should return an error since another name with similar name exists")
require.NoError(t, ensureNoNodeWithSimilarNameTxn(tx, node, true),
"Should not clash with another similar node name without ID")
// Set node1's Serf health to failing and replace it.
newNode := &structs.Node{
@ -135,17 +156,15 @@ func TestStateStore_ensureNoNodeWithSimilarNameTxn(t *testing.T) {
Node: "node1",
Address: "2.3.4.5",
}
if err := ensureNoNodeWithSimilarNameTxn(tx, newNode, false); err == nil {
t.Fatalf("Should return an error since the previous node is still healthy")
}
s.ensureCheckTxn(tx, 5, false, &structs.HealthCheck{
require.Error(t, ensureNoNodeWithSimilarNameTxn(tx, newNode, false),
"Should return an error since the previous node is still healthy")
require.NoError(t, s.ensureCheckTxn(tx, 5, false, &structs.HealthCheck{
Node: "node1",
CheckID: structs.SerfCheckID,
Status: api.HealthCritical,
})
if err := ensureNoNodeWithSimilarNameTxn(tx, newNode, false); err != nil {
t.Fatal(err)
}
}))
require.NoError(t, ensureNoNodeWithSimilarNameTxn(tx, newNode, false))
}
func TestStateStore_EnsureRegistration(t *testing.T) {
@ -183,7 +202,7 @@ func TestStateStore_EnsureRegistration(t *testing.T) {
}
require.Equal(t, node, out)
_, out2, err := s.GetNodeID(nodeID)
_, out2, err := s.GetNodeID(nodeID, nil)
if err != nil {
t.Fatalf("got err %s want nil", err)
}
@ -398,7 +417,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
t.Fatalf("err: %s", err)
}
if out == nil {
_, out, err = s.GetNodeID(types.NodeID(nodeLookup))
_, out, err = s.GetNodeID(types.NodeID(nodeLookup), nil)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -677,11 +696,11 @@ func TestNodeRenamingNodes(t *testing.T) {
t.Fatalf("err: %s", err)
}
if _, node, err := s.GetNodeID(nodeID1); err != nil || node == nil || node.ID != nodeID1 {
if _, node, err := s.GetNodeID(nodeID1, nil); err != nil || node == nil || node.ID != nodeID1 {
t.Fatalf("err: %s, node:= %q", err, node)
}
if _, node, err := s.GetNodeID(nodeID2); err != nil && node == nil || node.ID != nodeID2 {
if _, node, err := s.GetNodeID(nodeID2, nil); err != nil && node == nil || node.ID != nodeID2 {
t.Fatalf("err: %s", err)
}
@ -732,7 +751,7 @@ func TestNodeRenamingNodes(t *testing.T) {
}
// Retrieve the node again
idx2, out2, err := s.GetNodeID(nodeID2)
idx2, out2, err := s.GetNodeID(nodeID2, nil)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -1119,6 +1138,11 @@ func TestStateStore_GetNodesByMeta(t *testing.T) {
filters map[string]string
nodes []string
}{
// Empty meta filter
{
filters: map[string]string{},
nodes: []string{},
},
// Simple meta filter
{
filters: map[string]string{"role": "server"},
@ -1188,9 +1212,7 @@ func TestStateStore_NodeServices(t *testing.T) {
Node: "node1",
Address: "1.2.3.4",
}
if err := s.EnsureRegistration(1, req); err != nil {
t.Fatalf("err: %s", err)
}
require.NoError(t, s.EnsureRegistration(1, req))
}
{
req := &structs.RegisterRequest{
@ -1198,83 +1220,59 @@ func TestStateStore_NodeServices(t *testing.T) {
Node: "node2",
Address: "5.6.7.8",
}
if err := s.EnsureRegistration(2, req); err != nil {
t.Fatalf("err: %s", err)
}
require.NoError(t, s.EnsureRegistration(2, req))
}
// Look up by name.
{
_, ns, err := s.NodeServices(nil, "node1", nil)
if err != nil {
t.Fatalf("err: %v", err)
t.Run("Look up by name", func(t *testing.T) {
{
_, ns, err := s.NodeServices(nil, "node1", nil)
require.NoError(t, err)
require.NotNil(t, ns)
require.Equal(t, "node1", ns.Node.Node)
}
if ns == nil || ns.Node.Node != "node1" {
t.Fatalf("bad: %#v", *ns)
{
_, ns, err := s.NodeServices(nil, "node2", nil)
require.NoError(t, err)
require.NotNil(t, ns)
require.Equal(t, "node2", ns.Node.Node)
}
}
{
_, ns, err := s.NodeServices(nil, "node2", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if ns == nil || ns.Node.Node != "node2" {
t.Fatalf("bad: %#v", *ns)
}
}
})
// Look up by UUID.
{
_, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-aaaaaaaaaaaa", nil)
if err != nil {
t.Fatalf("err: %v", err)
t.Run("Look up by UUID", func(t *testing.T) {
{
_, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-aaaaaaaaaaaa", nil)
require.NoError(t, err)
require.NotNil(t, ns)
require.Equal(t, "node1", ns.Node.Node)
}
if ns == nil || ns.Node.Node != "node1" {
t.Fatalf("bad: %#v", ns)
{
_, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bbbbbbbbbbbb", nil)
require.NoError(t, err)
require.NotNil(t, ns)
require.Equal(t, "node2", ns.Node.Node)
}
}
{
_, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bbbbbbbbbbbb", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if ns == nil || ns.Node.Node != "node2" {
t.Fatalf("bad: %#v", ns)
}
}
})
// Ambiguous prefix.
{
t.Run("Ambiguous prefix", func(t *testing.T) {
_, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if ns != nil {
t.Fatalf("bad: %#v", ns)
}
}
require.NoError(t, err)
require.Nil(t, ns)
})
// Bad node, and not a UUID (should not get a UUID error).
{
t.Run("Bad node", func(t *testing.T) {
// Bad node, and not a UUID (should not get a UUID error).
_, ns, err := s.NodeServices(nil, "nope", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if ns != nil {
t.Fatalf("bad: %#v", ns)
}
}
require.NoError(t, err)
require.Nil(t, ns)
})
// Specific prefix.
{
t.Run("Specific prefix", func(t *testing.T) {
_, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bb", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if ns == nil || ns.Node.Node != "node2" {
t.Fatalf("bad: %#v", ns)
}
}
require.NoError(t, err)
require.NotNil(t, ns)
require.Equal(t, "node2", ns.Node.Node)
})
}
func TestStateStore_DeleteNode(t *testing.T) {
@ -7564,3 +7562,17 @@ func dumpMaxIndexes(t *testing.T, tx ReadTxn) map[string]uint64 {
}
return out
}
func generateUUID() ([]byte, string) {
buf := make([]byte, 16)
if _, err := crand.Read(buf); err != nil {
panic(fmt.Errorf("failed to read random bytes: %v", err))
}
uuid := fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16])
return buf, uuid
}

View File

@ -401,8 +401,8 @@ func (s *Store) discoveryChainTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, serv
var resp []structs.ServiceName
for _, t := range chain.Targets {
em := structs.NewEnterpriseMetaInDefaultPartition(t.Namespace)
target := structs.NewServiceName(t.Service, &em)
em := entMeta.NewEnterpriseMetaInPartition(t.Namespace)
target := structs.NewServiceName(t.Service, em)
// TODO (freddy): Allow upstream DC and encode in response
if t.Datacenter == dc {
@ -457,8 +457,8 @@ func (s *Store) discoveryChainSourcesTxn(tx ReadTxn, ws memdb.WatchSet, dc strin
}
for _, t := range chain.Targets {
em := structs.NewEnterpriseMetaInDefaultPartition(t.Namespace)
candidate := structs.NewServiceName(t.Service, &em)
em := sn.NewEnterpriseMetaInPartition(t.Namespace)
candidate := structs.NewServiceName(t.Service, em)
if !candidate.Matches(destination) {
continue
@ -489,13 +489,15 @@ func validateProposedConfigEntryInServiceGraph(
enforceIngressProtocolsMatch bool
)
wildcardEntMeta := kindName.WildcardEnterpriseMetaForPartition()
switch kindName.Kind {
case structs.ProxyDefaults:
// Check anything that has a discovery chain entry. In the future we could
// somehow omit the ones that have a default protocol configured.
for _, kind := range serviceGraphKinds {
_, entries, err := configEntriesByKindTxn(tx, nil, kind, structs.WildcardEnterpriseMetaInDefaultPartition())
_, entries, err := configEntriesByKindTxn(tx, nil, kind, wildcardEntMeta)
if err != nil {
return err
}
@ -504,7 +506,7 @@ func validateProposedConfigEntryInServiceGraph(
}
}
_, ingressEntries, err := configEntriesByKindTxn(tx, nil, structs.IngressGateway, structs.WildcardEnterpriseMetaInDefaultPartition())
_, ingressEntries, err := configEntriesByKindTxn(tx, nil, structs.IngressGateway, wildcardEntMeta)
if err != nil {
return err
}
@ -516,7 +518,7 @@ func validateProposedConfigEntryInServiceGraph(
checkIngress = append(checkIngress, ingress)
}
_, ixnEntries, err := configEntriesByKindTxn(tx, nil, structs.ServiceIntentions, structs.WildcardEnterpriseMetaInDefaultPartition())
_, ixnEntries, err := configEntriesByKindTxn(tx, nil, structs.ServiceIntentions, wildcardEntMeta)
if err != nil {
return err
}
@ -573,7 +575,7 @@ func validateProposedConfigEntryInServiceGraph(
checkIntentions = append(checkIntentions, ixn)
}
_, ixnEntries, err := configEntriesByKindTxn(tx, nil, structs.ServiceIntentions, structs.WildcardEnterpriseMetaInDefaultPartition())
_, ixnEntries, err := configEntriesByKindTxn(tx, nil, structs.ServiceIntentions, wildcardEntMeta)
if err != nil {
return err
}

View File

@ -51,13 +51,25 @@ func indexFromServiceNameAsString(arg interface{}) ([]byte, error) {
return b.Bytes(), nil
}
// uuidStringToBytes is a modified version of memdb.UUIDFieldIndex.parseString
func uuidStringToBytes(uuid string) ([]byte, error) {
l := len(uuid)
if l != 36 {
// Verify the length
if l := len(uuid); l != 36 {
return nil, fmt.Errorf("UUID must be 36 characters")
}
return parseUUIDString(uuid)
}
func variableLengthUUIDStringToBytes(uuid string) ([]byte, error) {
// Verify the length
if l := len(uuid); l > 36 {
return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l)
}
return parseUUIDString(uuid)
}
// parseUUIDString is a modified version of memdb.UUIDFieldIndex.parseString.
// Callers should verify the length.
func parseUUIDString(uuid string) ([]byte, error) {
hyphens := strings.Count(uuid, "-")
if hyphens > 4 {
return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens)
@ -83,3 +95,36 @@ type BoolQuery struct {
Value bool
structs.EnterpriseMeta
}
// KeyValueQuery is a type used to query for both a key and a value that may
// include an enterprise identifier.
type KeyValueQuery struct {
Key string
Value string
structs.EnterpriseMeta
}
// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer
// receiver for this method. Remove once that is fixed.
func (q KeyValueQuery) NamespaceOrDefault() string {
return q.EnterpriseMeta.NamespaceOrDefault()
}
// PartitionOrDefault exists because structs.EnterpriseMeta uses a pointer
// receiver for this method. Remove once that is fixed.
func (q KeyValueQuery) PartitionOrDefault() string {
return q.EnterpriseMeta.PartitionOrDefault()
}
func indexFromKeyValueQuery(arg interface{}) ([]byte, error) {
// NOTE: this is case-sensitive!
q, ok := arg.(KeyValueQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for Query index", arg)
}
var b indexBuilder
b.String(q.Key)
b.String(q.Value)
return b.Bytes(), nil
}

View File

@ -1,6 +1,7 @@
package state
import (
"sort"
"testing"
"github.com/hashicorp/go-memdb"
@ -110,12 +111,20 @@ func (tc indexerTestCase) run(t *testing.T, indexer memdb.Indexer) {
}
}
sortMultiByteSlice := func(v [][]byte) {
sort.Slice(v, func(i, j int) bool {
return string(v[i]) < string(v[j])
})
}
if i, ok := indexer.(memdb.MultiIndexer); ok {
t.Run("writeIndexMulti", func(t *testing.T) {
valid, actual, err := i.FromObject(tc.writeMulti.source)
require.NoError(t, err)
require.True(t, valid)
require.Equal(t, tc.writeMulti.expected, actual)
sortMultiByteSlice(actual)
sortMultiByteSlice(tc.writeMulti.expected)
require.ElementsMatch(t, tc.writeMulti.expected, actual)
})
}

View File

@ -94,7 +94,10 @@ func testRegisterNodeOpts(t *testing.T, s *Store, idx uint64, nodeID string, opt
tx := s.db.Txn(false)
defer tx.Abort()
n, err := tx.First(tableNodes, indexID, Query{Value: nodeID})
n, err := tx.First(tableNodes, indexID, Query{
Value: nodeID,
EnterpriseMeta: *node.GetEnterpriseMeta(),
})
if err != nil {
t.Fatalf("err: %s", err)
}

View File

@ -422,7 +422,19 @@ type nodePayload struct {
node *structs.ServiceNode
}
func (p nodePayload) MatchesKey(key, _ string) bool {
func (p nodePayload) MatchesKey(key, _, partition string) bool {
if key == "" && partition == "" {
return true
}
if p.node == nil {
return false
}
if structs.PartitionOrDefault(partition) != p.node.PartitionOrDefault() {
return false
}
return p.key == key
}

View File

@ -153,7 +153,7 @@ func (s *Store) txnNode(tx WriteTxn, idx uint64, op *structs.TxnNodeOp) (structs
getNode := func() (*structs.Node, error) {
if op.Node.ID != "" {
return getNodeIDTxn(tx, op.Node.ID)
return getNodeIDTxn(tx, op.Node.ID, op.Node.GetEnterpriseMeta())
} else {
return getNodeTxn(tx, op.Node.Node, op.Node.GetEnterpriseMeta())
}

View File

@ -10,16 +10,18 @@ import (
const (
serviceNamesUsageTable = "service-names"
tableUsage = "usage"
)
// usageTableSchema returns a new table schema used for tracking various indexes
// for the Raft log.
func usageTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: "usage",
Name: tableUsage,
Indexes: map[string]*memdb.IndexSchema{
"id": {
Name: "id",
indexID: {
Name: indexID,
AllowMissing: false,
Unique: true,
Indexer: &memdb.StringFieldIndex{
@ -46,6 +48,12 @@ type ServiceUsage struct {
EnterpriseServiceUsage
}
// NodeUsage contains all of the usage data related to nodes
type NodeUsage struct {
Nodes int
EnterpriseNodeUsage
}
type uniqueServiceState int
const (
@ -68,8 +76,10 @@ func updateUsage(tx WriteTxn, changes Changes) error {
}
switch change.Table {
case "nodes":
case tableNodes:
usageDeltas[change.Table] += delta
addEnterpriseNodeUsage(usageDeltas, change)
case tableServices:
svc := changeObject(change).(*structs.ServiceNode)
usageDeltas[change.Table] += delta
@ -98,7 +108,8 @@ func updateUsage(tx WriteTxn, changes Changes) error {
// This will happen when restoring from a snapshot, just take the max index
// of the tables we are tracking.
if idx == 0 {
idx = maxIndexTxn(tx, "nodes", tableServices)
// TODO(partitions? namespaces?)
idx = maxIndexTxn(tx, tableNodes, tableServices)
}
return writeUsageDeltas(tx, idx, usageDeltas)
@ -107,7 +118,10 @@ func updateUsage(tx WriteTxn, changes Changes) error {
func updateServiceNameUsage(tx WriteTxn, usageDeltas map[string]int, serviceNameChanges map[structs.ServiceName]int) (map[structs.ServiceName]uniqueServiceState, error) {
serviceStates := make(map[structs.ServiceName]uniqueServiceState, len(serviceNameChanges))
for svc, delta := range serviceNameChanges {
q := Query{Value: svc.Name, EnterpriseMeta: svc.EnterpriseMeta}
q := Query{
Value: svc.Name,
EnterpriseMeta: svc.EnterpriseMeta,
}
serviceIter, err := tx.Get(tableServices, indexService, q)
if err != nil {
return nil, err
@ -162,7 +176,7 @@ func serviceNameChanged(change memdb.Change) bool {
// passed in will be recorded on the entry as well.
func writeUsageDeltas(tx WriteTxn, idx uint64, usageDeltas map[string]int) error {
for id, delta := range usageDeltas {
u, err := tx.First("usage", "id", id)
u, err := tx.First(tableUsage, indexID, id)
if err != nil {
return fmt.Errorf("failed to retrieve existing usage entry: %s", err)
}
@ -175,7 +189,7 @@ func writeUsageDeltas(tx WriteTxn, idx uint64, usageDeltas map[string]int) error
// large numbers.
delta = 0
}
err := tx.Insert("usage", &UsageEntry{
err := tx.Insert(tableUsage, &UsageEntry{
ID: id,
Count: delta,
Index: idx,
@ -192,7 +206,7 @@ func writeUsageDeltas(tx WriteTxn, idx uint64, usageDeltas map[string]int) error
// large numbers.
updated = 0
}
err := tx.Insert("usage", &UsageEntry{
err := tx.Insert(tableUsage, &UsageEntry{
ID: id,
Count: updated,
Index: idx,
@ -205,17 +219,26 @@ func writeUsageDeltas(tx WriteTxn, idx uint64, usageDeltas map[string]int) error
return nil
}
// NodeCount returns the latest seen Raft index, a count of the number of nodes
// registered, and any errors.
func (s *Store) NodeCount() (uint64, int, error) {
// NodeUsage returns the latest seen Raft index, a compiled set of node usage
// data, and any errors.
func (s *Store) NodeUsage() (uint64, NodeUsage, error) {
tx := s.db.ReadTxn()
defer tx.Abort()
nodeUsage, err := firstUsageEntry(tx, "nodes")
nodes, err := firstUsageEntry(tx, tableNodes)
if err != nil {
return 0, 0, fmt.Errorf("failed nodes lookup: %s", err)
return 0, NodeUsage{}, fmt.Errorf("failed nodes lookup: %s", err)
}
return nodeUsage.Index, nodeUsage.Count, nil
usage := NodeUsage{
Nodes: nodes.Count,
}
results, err := compileEnterpriseNodeUsage(tx, usage)
if err != nil {
return 0, NodeUsage{}, fmt.Errorf("failed nodes lookup: %s", err)
}
return nodes.Index, results, nil
}
// ServiceUsage returns the latest seen Raft index, a compiled set of service
@ -238,7 +261,7 @@ func (s *Store) ServiceUsage() (uint64, ServiceUsage, error) {
ServiceInstances: serviceInstances.Count,
Services: services.Count,
}
results, err := compileEnterpriseUsage(tx, usage)
results, err := compileEnterpriseServiceUsage(tx, usage)
if err != nil {
return 0, ServiceUsage{}, fmt.Errorf("failed services lookup: %s", err)
}
@ -247,7 +270,7 @@ func (s *Store) ServiceUsage() (uint64, ServiceUsage, error) {
}
func firstUsageEntry(tx ReadTxn, id string) (*UsageEntry, error) {
usage, err := tx.First("usage", "id", id)
usage, err := tx.First(tableUsage, indexID, id)
if err != nil {
return nil, err
}

View File

@ -3,16 +3,24 @@
package state
import (
"github.com/hashicorp/consul/agent/structs"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/agent/structs"
)
type EnterpriseServiceUsage struct{}
type EnterpriseNodeUsage struct{}
func addEnterpriseNodeUsage(map[string]int, memdb.Change) {}
func addEnterpriseServiceInstanceUsage(map[string]int, memdb.Change) {}
func addEnterpriseServiceUsage(map[string]int, map[structs.ServiceName]uniqueServiceState) {}
func compileEnterpriseUsage(tx ReadTxn, usage ServiceUsage) (ServiceUsage, error) {
func compileEnterpriseServiceUsage(tx ReadTxn, usage ServiceUsage) (ServiceUsage, error) {
return usage, nil
}
func compileEnterpriseNodeUsage(tx ReadTxn, usage NodeUsage) (NodeUsage, error) {
return usage, nil
}

View File

@ -1,25 +0,0 @@
// +build !consulent
package state
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestStateStore_Usage_ServiceUsage(t *testing.T) {
s := testStateStore(t)
testRegisterNode(t, s, 0, "node1")
testRegisterNode(t, s, 1, "node2")
testRegisterService(t, s, 8, "node1", "service1")
testRegisterService(t, s, 9, "node2", "service1")
testRegisterService(t, s, 10, "node2", "service2")
idx, usage, err := s.ServiceUsage()
require.NoError(t, err)
require.Equal(t, idx, uint64(10))
require.Equal(t, 2, usage.Services)
require.Equal(t, 3, usage.ServiceInstances)
}

View File

@ -9,40 +9,40 @@ import (
"github.com/hashicorp/consul/agent/structs"
)
func TestStateStore_Usage_NodeCount(t *testing.T) {
func TestStateStore_Usage_NodeUsage(t *testing.T) {
s := testStateStore(t)
// No nodes have been registered, and thus no usage entry exists
idx, count, err := s.NodeCount()
idx, usage, err := s.NodeUsage()
require.NoError(t, err)
require.Equal(t, idx, uint64(0))
require.Equal(t, count, 0)
require.Equal(t, usage.Nodes, 0)
testRegisterNode(t, s, 0, "node1")
testRegisterNode(t, s, 1, "node2")
idx, count, err = s.NodeCount()
idx, usage, err = s.NodeUsage()
require.NoError(t, err)
require.Equal(t, idx, uint64(1))
require.Equal(t, count, 2)
require.Equal(t, usage.Nodes, 2)
}
func TestStateStore_Usage_NodeCount_Delete(t *testing.T) {
func TestStateStore_Usage_NodeUsage_Delete(t *testing.T) {
s := testStateStore(t)
testRegisterNode(t, s, 0, "node1")
testRegisterNode(t, s, 1, "node2")
idx, count, err := s.NodeCount()
idx, usage, err := s.NodeUsage()
require.NoError(t, err)
require.Equal(t, idx, uint64(1))
require.Equal(t, count, 2)
require.Equal(t, usage.Nodes, 2)
require.NoError(t, s.DeleteNode(2, "node2", nil))
idx, count, err = s.NodeCount()
idx, usage, err = s.NodeUsage()
require.NoError(t, err)
require.Equal(t, idx, uint64(2))
require.Equal(t, count, 1)
require.Equal(t, usage.Nodes, 1)
}
func TestStateStore_Usage_ServiceUsageEmpty(t *testing.T) {
@ -56,6 +56,22 @@ func TestStateStore_Usage_ServiceUsageEmpty(t *testing.T) {
require.Equal(t, usage.ServiceInstances, 0)
}
func TestStateStore_Usage_ServiceUsage(t *testing.T) {
s := testStateStore(t)
testRegisterNode(t, s, 0, "node1")
testRegisterNode(t, s, 1, "node2")
testRegisterService(t, s, 8, "node1", "service1")
testRegisterService(t, s, 9, "node2", "service1")
testRegisterService(t, s, 10, "node2", "service2")
idx, usage, err := s.ServiceUsage()
require.NoError(t, err)
require.Equal(t, idx, uint64(10))
require.Equal(t, 2, usage.Services)
require.Equal(t, 3, usage.ServiceInstances)
}
func TestStateStore_Usage_ServiceUsage_DeleteNode(t *testing.T) {
s := testStateStore(t)
testRegisterNode(t, s, 1, "node1")
@ -116,10 +132,10 @@ func TestStateStore_Usage_Restore(t *testing.T) {
})
require.NoError(t, restore.Commit())
idx, count, err := s.NodeCount()
idx, nodeUsage, err := s.NodeUsage()
require.NoError(t, err)
require.Equal(t, idx, uint64(9))
require.Equal(t, count, 1)
require.Equal(t, nodeUsage.Nodes, 1)
idx, usage, err := s.ServiceUsage()
require.NoError(t, err)

View File

@ -26,12 +26,13 @@ type Event struct {
// should not modify the state of the payload if the Event is being submitted to
// EventPublisher.Publish.
type Payload interface {
// MatchesKey must return true if the Payload should be included in a subscription
// requested with the key and namespace.
// Generally this means that the payload matches the key and namespace or
// the payload is a special framing event that should be returned to every
// subscription.
MatchesKey(key, namespace string) bool
// MatchesKey must return true if the Payload should be included in a
// subscription requested with the key, namespace, and partition.
//
// Generally this means that the payload matches the key, namespace, and
// partition or the payload is a special framing event that should be
// returned to every subscription.
MatchesKey(key, namespace, partition string) bool
// HasReadPermission uses the acl.Authorizer to determine if the items in the
// Payload are visible to the request. It returns true if the payload is
@ -80,10 +81,11 @@ func (p *PayloadEvents) filter(f func(Event) bool) bool {
return true
}
// MatchesKey filters the PayloadEvents to those which match the key and namespace.
func (p *PayloadEvents) MatchesKey(key, namespace string) bool {
// MatchesKey filters the PayloadEvents to those which match the key,
// namespace, and partition.
func (p *PayloadEvents) MatchesKey(key, namespace, partition string) bool {
return p.filter(func(event Event) bool {
return event.Payload.MatchesKey(key, namespace)
return event.Payload.MatchesKey(key, namespace, partition)
})
}
@ -115,7 +117,7 @@ func (e Event) IsNewSnapshotToFollow() bool {
type framingEvent struct{}
func (framingEvent) MatchesKey(string, string) bool {
func (framingEvent) MatchesKey(string, string, string) bool {
return true
}
@ -135,7 +137,7 @@ type closeSubscriptionPayload struct {
tokensSecretIDs []string
}
func (closeSubscriptionPayload) MatchesKey(string, string) bool {
func (closeSubscriptionPayload) MatchesKey(string, string, string) bool {
return false
}

View File

@ -291,5 +291,5 @@ func (e *EventPublisher) setCachedSnapshotLocked(req *SubscribeRequest, snap *ev
}
func snapCacheKey(req *SubscribeRequest) string {
return fmt.Sprintf(req.Namespace + "/" + req.Key)
return req.Partition + "/" + req.Namespace + "/" + req.Key
}

View File

@ -70,7 +70,7 @@ type simplePayload struct {
noReadPerm bool
}
func (p simplePayload) MatchesKey(key, _ string) bool {
func (p simplePayload) MatchesKey(key, _, _ string) bool {
if key == "" {
return true
}

View File

@ -35,7 +35,7 @@ func TestPayloadEvents_FilterByKey(t *testing.T) {
events = append(events, tc.events...)
pe := &PayloadEvents{Items: events}
ok := pe.MatchesKey(tc.req.Key, tc.req.Namespace)
ok := pe.MatchesKey(tc.req.Key, tc.req.Namespace, tc.req.Partition)
require.Equal(t, tc.expectEvent, ok)
if !tc.expectEvent {
return
@ -133,6 +133,7 @@ func TestPayloadEvents_FilterByKey(t *testing.T) {
}
}
// TODO(partitions)
func newNSEvent(key, namespace string) Event {
return Event{Index: 22, Payload: nsPayload{key: key, namespace: namespace}}
}
@ -141,11 +142,14 @@ type nsPayload struct {
framingEvent
key string
namespace string
partition string
value string
}
func (p nsPayload) MatchesKey(key, namespace string) bool {
return (key == "" || key == p.key) && (namespace == "" || namespace == p.namespace)
func (p nsPayload) MatchesKey(key, namespace, partition string) bool {
return (key == "" || key == p.key) &&
(namespace == "" || namespace == p.namespace) &&
(partition == "" || partition == p.partition)
}
func TestPayloadEvents_HasReadPermission(t *testing.T) {

View File

@ -62,6 +62,9 @@ type SubscribeRequest struct {
// Namespace used to filter events in the topic. Only events matching the
// namespace will be returned by the subscription.
Namespace string
// Partition used to filter events in the topic. Only events matching the
// partition will be returned by the subscription.
Partition string // TODO(partitions): make this work
// Token that was used to authenticate the request. If any ACL policy
// changes impact the token the subscription will be forcefully closed.
Token string
@ -102,7 +105,7 @@ func (s *Subscription) Next(ctx context.Context) (Event, error) {
continue
}
event := newEventFromBatch(s.req, next.Events)
if !event.Payload.MatchesKey(s.req.Key, s.req.Namespace) {
if !event.Payload.MatchesKey(s.req.Key, s.req.Namespace, s.req.Partition) {
continue
}
return event, nil

View File

@ -234,7 +234,6 @@ func TestTxn_Apply(t *testing.T) {
t.Fatalf("bad: %v", d)
}
// TODO(partitions)
_, n, err := state.GetNode("foo", nil)
if err != nil {
t.Fatalf("err: %v", err)

View File

@ -8,10 +8,11 @@ import (
"github.com/armon/go-metrics/prometheus"
"github.com/armon/go-metrics"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/serf/serf"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/logging"
)
var Gauges = []prometheus.GaugeDefinition{
@ -145,15 +146,13 @@ func (u *UsageMetricsReporter) Run(ctx context.Context) {
func (u *UsageMetricsReporter) runOnce() {
state := u.stateProvider.State()
_, nodes, err := state.NodeCount()
_, nodeUsage, err := state.NodeUsage()
if err != nil {
u.logger.Warn("failed to retrieve nodes from state store", "error", err)
}
metrics.SetGaugeWithLabels(
[]string{"consul", "state", "nodes"},
float32(nodes),
u.metricLabels,
)
u.emitNodeUsage(nodeUsage)
_, serviceUsage, err := state.ServiceUsage()
if err != nil {
@ -162,65 +161,27 @@ func (u *UsageMetricsReporter) runOnce() {
u.emitServiceUsage(serviceUsage)
servers, clients := u.memberUsage()
u.emitMemberUsage(servers, clients)
members := u.memberUsage()
u.emitMemberUsage(members)
}
func (u *UsageMetricsReporter) memberUsage() (int, map[string]int) {
func (u *UsageMetricsReporter) memberUsage() []serf.Member {
if u.getMembersFunc == nil {
return 0, nil
return nil
}
mems := u.getMembersFunc()
if len(mems) <= 0 {
u.logger.Warn("cluster reported zero members")
return 0, nil
}
servers := 0
clients := make(map[string]int)
out := make([]serf.Member, 0, len(mems))
for _, m := range mems {
if m.Status != serf.StatusAlive {
continue
}
switch m.Tags["role"] {
case "node":
clients[m.Tags["segment"]]++
case "consul":
servers++
}
out = append(out, m)
}
return servers, clients
}
func (u *UsageMetricsReporter) emitMemberUsage(servers int, clients map[string]int) {
totalClients := 0
for seg, c := range clients {
segmentLabel := metrics.Label{Name: "segment", Value: seg}
labels := append([]metrics.Label{segmentLabel}, u.metricLabels...)
metrics.SetGaugeWithLabels(
[]string{"consul", "members", "clients"},
float32(c),
labels,
)
totalClients += c
}
metrics.SetGaugeWithLabels(
[]string{"consul", "members", "clients"},
float32(totalClients),
u.metricLabels,
)
metrics.SetGaugeWithLabels(
[]string{"consul", "members", "servers"},
float32(servers),
u.metricLabels,
)
return out
}

View File

@ -4,9 +4,47 @@ package usagemetrics
import (
"github.com/armon/go-metrics"
"github.com/hashicorp/serf/serf"
"github.com/hashicorp/consul/agent/consul/state"
)
func (u *UsageMetricsReporter) emitNodeUsage(nodeUsage state.NodeUsage) {
metrics.SetGaugeWithLabels(
[]string{"consul", "state", "nodes"},
float32(nodeUsage.Nodes),
u.metricLabels,
)
}
func (u *UsageMetricsReporter) emitMemberUsage(members []serf.Member) {
var (
servers int
clients int
)
for _, m := range members {
switch m.Tags["role"] {
case "node":
clients++
case "consul":
servers++
}
}
metrics.SetGaugeWithLabels(
[]string{"consul", "members", "clients"},
float32(clients),
u.metricLabels,
)
metrics.SetGaugeWithLabels(
[]string{"consul", "members", "servers"},
float32(servers),
u.metricLabels,
)
}
func (u *UsageMetricsReporter) emitServiceUsage(serviceUsage state.ServiceUsage) {
metrics.SetGaugeWithLabels(
[]string{"consul", "state", "services"},

View File

@ -9,16 +9,151 @@ import (
"github.com/armon/go-metrics"
"github.com/stretchr/testify/require"
"github.com/hashicorp/serf/serf"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/serf/serf"
)
func newStateStore() (*state.Store, error) {
return state.NewStateStore(nil), nil
}
func TestUsageReporter_emitNodeUsage_OSS(t *testing.T) {
type testCase struct {
modfiyStateStore func(t *testing.T, s *state.Store)
getMembersFunc getMembersFunc
expectedGauges map[string]metrics.GaugeValue
}
cases := map[string]testCase{
"empty-state": {
expectedGauges: map[string]metrics.GaugeValue{
// --- node ---
"consul.usage.test.consul.state.nodes;datacenter=dc1": {
Name: "consul.usage.test.consul.state.nodes",
Value: 0,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
// --- member ---
"consul.usage.test.consul.members.clients;datacenter=dc1": {
Name: "consul.usage.test.consul.members.clients",
Value: 0,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
"consul.usage.test.consul.members.servers;datacenter=dc1": {
Name: "consul.usage.test.consul.members.servers",
Value: 0,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
// --- service ---
"consul.usage.test.consul.state.services;datacenter=dc1": {
Name: "consul.usage.test.consul.state.services",
Value: 0,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
"consul.usage.test.consul.state.service_instances;datacenter=dc1": {
Name: "consul.usage.test.consul.state.service_instances",
Value: 0,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
},
getMembersFunc: func() []serf.Member { return []serf.Member{} },
},
"nodes": {
modfiyStateStore: func(t *testing.T, s *state.Store) {
require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"}))
},
getMembersFunc: func() []serf.Member {
return []serf.Member{
{
Name: "foo",
Tags: map[string]string{"role": "consul"},
Status: serf.StatusAlive,
},
{
Name: "bar",
Tags: map[string]string{"role": "consul"},
Status: serf.StatusAlive,
},
{
Name: "baz",
Tags: map[string]string{"role": "node"},
Status: serf.StatusAlive,
},
}
},
expectedGauges: map[string]metrics.GaugeValue{
// --- node ---
"consul.usage.test.consul.state.nodes;datacenter=dc1": {
Name: "consul.usage.test.consul.state.nodes",
Value: 3,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
// --- member ---
"consul.usage.test.consul.members.servers;datacenter=dc1": {
Name: "consul.usage.test.consul.members.servers",
Value: 2,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
"consul.usage.test.consul.members.clients;datacenter=dc1": {
Name: "consul.usage.test.consul.members.clients",
Value: 1,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
// --- service ---
"consul.usage.test.consul.state.services;datacenter=dc1": {
Name: "consul.usage.test.consul.state.services",
Value: 0,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
"consul.usage.test.consul.state.service_instances;datacenter=dc1": {
Name: "consul.usage.test.consul.state.service_instances",
Value: 0,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
},
},
}
for name, tcase := range cases {
t.Run(name, func(t *testing.T) {
// Only have a single interval for the test
sink := metrics.NewInmemSink(1*time.Minute, 1*time.Minute)
cfg := metrics.DefaultConfig("consul.usage.test")
cfg.EnableHostname = false
metrics.NewGlobal(cfg, sink)
mockStateProvider := &mockStateProvider{}
s, err := newStateStore()
require.NoError(t, err)
if tcase.modfiyStateStore != nil {
tcase.modfiyStateStore(t, s)
}
mockStateProvider.On("State").Return(s)
reporter, err := NewUsageMetricsReporter(
new(Config).
WithStateProvider(mockStateProvider).
WithLogger(testutil.Logger(t)).
WithDatacenter("dc1").
WithGetMembersFunc(tcase.getMembersFunc),
)
require.NoError(t, err)
reporter.runOnce()
intervals := sink.Data()
require.Len(t, intervals, 1)
intv := intervals[0]
assertEqualGaugeMaps(t, tcase.expectedGauges, intv.Gauges)
})
}
}
func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) {
type testCase struct {
modfiyStateStore func(t *testing.T, s *state.Store)
@ -28,11 +163,28 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) {
cases := map[string]testCase{
"empty-state": {
expectedGauges: map[string]metrics.GaugeValue{
// --- node ---
"consul.usage.test.consul.state.nodes;datacenter=dc1": {
Name: "consul.usage.test.consul.state.nodes",
Value: 0,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
// --- member ---
"consul.usage.test.consul.members.servers;datacenter=dc1": {
Name: "consul.usage.test.consul.members.servers",
Value: 0,
Labels: []metrics.Label{
{Name: "datacenter", Value: "dc1"},
},
},
"consul.usage.test.consul.members.clients;datacenter=dc1": {
Name: "consul.usage.test.consul.members.clients",
Value: 0,
Labels: []metrics.Label{
{Name: "datacenter", Value: "dc1"},
},
},
// --- service ---
"consul.usage.test.consul.state.services;datacenter=dc1": {
Name: "consul.usage.test.consul.state.services",
Value: 0,
@ -47,35 +199,21 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) {
{Name: "datacenter", Value: "dc1"},
},
},
"consul.usage.test.consul.members.clients;datacenter=dc1": {
Name: "consul.usage.test.consul.members.clients",
Value: 0,
Labels: []metrics.Label{
{Name: "datacenter", Value: "dc1"},
},
},
"consul.usage.test.consul.members.servers;datacenter=dc1": {
Name: "consul.usage.test.consul.members.servers",
Value: 0,
Labels: []metrics.Label{
{Name: "datacenter", Value: "dc1"},
},
},
},
getMembersFunc: func() []serf.Member { return []serf.Member{} },
},
"nodes-and-services": {
modfiyStateStore: func(t *testing.T, s *state.Store) {
require.Nil(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
require.Nil(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
require.Nil(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"}))
require.Nil(t, s.EnsureNode(4, &structs.Node{Node: "qux", Address: "127.0.0.3"}))
require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"}))
require.NoError(t, s.EnsureNode(4, &structs.Node{Node: "qux", Address: "127.0.0.3"}))
// Typical services and some consul services spread across two nodes
require.Nil(t, s.EnsureService(5, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
require.Nil(t, s.EnsureService(6, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
require.Nil(t, s.EnsureService(7, "foo", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil}))
require.Nil(t, s.EnsureService(8, "bar", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil}))
require.NoError(t, s.EnsureService(5, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
require.NoError(t, s.EnsureService(6, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
require.NoError(t, s.EnsureService(7, "foo", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil}))
require.NoError(t, s.EnsureService(8, "bar", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil}))
},
getMembersFunc: func() []serf.Member {
return []serf.Member{
@ -102,21 +240,16 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) {
}
},
expectedGauges: map[string]metrics.GaugeValue{
// --- node ---
"consul.usage.test.consul.state.nodes;datacenter=dc1": {
Name: "consul.usage.test.consul.state.nodes",
Value: 4,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
"consul.usage.test.consul.state.services;datacenter=dc1": {
Name: "consul.usage.test.consul.state.services",
Value: 3,
Labels: []metrics.Label{
{Name: "datacenter", Value: "dc1"},
},
},
"consul.usage.test.consul.state.service_instances;datacenter=dc1": {
Name: "consul.usage.test.consul.state.service_instances",
Value: 4,
// --- member ---
"consul.usage.test.consul.members.servers;datacenter=dc1": {
Name: "consul.usage.test.consul.members.servers",
Value: 2,
Labels: []metrics.Label{
{Name: "datacenter", Value: "dc1"},
},
@ -128,26 +261,18 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) {
{Name: "datacenter", Value: "dc1"},
},
},
"consul.usage.test.consul.members.servers;datacenter=dc1": {
Name: "consul.usage.test.consul.members.servers",
Value: 2,
// --- service ---
"consul.usage.test.consul.state.services;datacenter=dc1": {
Name: "consul.usage.test.consul.state.services",
Value: 3,
Labels: []metrics.Label{
{Name: "datacenter", Value: "dc1"},
},
},
"consul.usage.test.consul.members.clients;segment=a;datacenter=dc1": {
Name: "consul.usage.test.consul.members.clients",
Value: 1,
"consul.usage.test.consul.state.service_instances;datacenter=dc1": {
Name: "consul.usage.test.consul.state.service_instances",
Value: 4,
Labels: []metrics.Label{
{Name: "segment", Value: "a"},
{Name: "datacenter", Value: "dc1"},
},
},
"consul.usage.test.consul.members.clients;segment=b;datacenter=dc1": {
Name: "consul.usage.test.consul.members.clients",
Value: 1,
Labels: []metrics.Label{
{Name: "segment", Value: "b"},
{Name: "datacenter", Value: "dc1"},
},
},
@ -185,7 +310,7 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) {
require.Len(t, intervals, 1)
intv := intervals[0]
require.Equal(t, tcase.expectedGauges, intv.Gauges)
assertEqualGaugeMaps(t, tcase.expectedGauges, intv.Gauges)
})
}
}

View File

@ -2,16 +2,12 @@ package usagemetrics
import (
"testing"
"time"
"github.com/armon/go-metrics"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/serf/serf"
)
type mockStateProvider struct {
@ -23,106 +19,20 @@ func (m *mockStateProvider) State() *state.Store {
return retValues.Get(0).(*state.Store)
}
func TestUsageReporter_Run_Nodes(t *testing.T) {
type testCase struct {
modfiyStateStore func(t *testing.T, s *state.Store)
getMembersFunc getMembersFunc
expectedGauges map[string]metrics.GaugeValue
}
cases := map[string]testCase{
"empty-state": {
expectedGauges: map[string]metrics.GaugeValue{
"consul.usage.test.consul.state.nodes;datacenter=dc1": {
Name: "consul.usage.test.consul.state.nodes",
Value: 0,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
},
getMembersFunc: func() []serf.Member { return []serf.Member{} },
},
"nodes": {
modfiyStateStore: func(t *testing.T, s *state.Store) {
require.Nil(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
require.Nil(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
require.Nil(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"}))
},
getMembersFunc: func() []serf.Member {
return []serf.Member{
{
Name: "foo",
Tags: map[string]string{"role": "consul"},
Status: serf.StatusAlive,
},
{
Name: "bar",
Tags: map[string]string{"role": "consul"},
Status: serf.StatusAlive,
},
{
Name: "baz",
Tags: map[string]string{"role": "node"},
Status: serf.StatusAlive,
},
}
},
expectedGauges: map[string]metrics.GaugeValue{
"consul.usage.test.consul.state.nodes;datacenter=dc1": {
Name: "consul.usage.test.consul.state.nodes",
Value: 3,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
"consul.usage.test.consul.members.clients;datacenter=dc1": {
Name: "consul.usage.test.consul.members.clients",
Value: 1,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
"consul.usage.test.consul.members.servers;datacenter=dc1": {
Name: "consul.usage.test.consul.members.servers",
Value: 2,
Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}},
},
},
},
func assertEqualGaugeMaps(t *testing.T, expectedMap, foundMap map[string]metrics.GaugeValue) {
t.Helper()
for key := range foundMap {
if _, ok := expectedMap[key]; !ok {
t.Errorf("found unexpected gauge key: %s", key)
}
}
for name, tcase := range cases {
t.Run(name, func(t *testing.T) {
// Only have a single interval for the test
sink := metrics.NewInmemSink(1*time.Minute, 1*time.Minute)
cfg := metrics.DefaultConfig("consul.usage.test")
cfg.EnableHostname = false
metrics.NewGlobal(cfg, sink)
mockStateProvider := &mockStateProvider{}
s, err := newStateStore()
require.NoError(t, err)
if tcase.modfiyStateStore != nil {
tcase.modfiyStateStore(t, s)
}
mockStateProvider.On("State").Return(s)
reporter, err := NewUsageMetricsReporter(
new(Config).
WithStateProvider(mockStateProvider).
WithLogger(testutil.Logger(t)).
WithDatacenter("dc1").
WithGetMembersFunc(tcase.getMembersFunc),
)
require.NoError(t, err)
reporter.runOnce()
intervals := sink.Data()
require.Len(t, intervals, 1)
intv := intervals[0]
// Range over the expected values instead of just doing an Equal
// comparison on the maps because of different metrics emitted between
// OSS and Ent. The enterprise and OSS tests have a full equality
// comparison on the maps.
for key, expected := range tcase.expectedGauges {
require.Equal(t, expected, intv.Gauges[key])
}
})
for key, expected := range expectedMap {
if _, ok := foundMap[key]; !ok {
t.Errorf("did not find expected gauge key: %s", key)
continue
}
assert.Equal(t, expected, foundMap[key], "gauge key mismatch on %q", key)
}
}

View File

@ -82,6 +82,9 @@ func (s *HTTPHandlers) CoordinateNodes(resp http.ResponseWriter, req *http.Reque
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
if err := parseEntMetaPartition(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
var out structs.IndexedCoordinates
defer setMeta(resp, &out.QueryMeta)
@ -105,6 +108,9 @@ func (s *HTTPHandlers) CoordinateNode(resp http.ResponseWriter, req *http.Reques
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
if err := parseEntMetaPartition(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
var out structs.IndexedCoordinates
defer setMeta(resp, &out.QueryMeta)
@ -158,6 +164,10 @@ func (s *HTTPHandlers) CoordinateUpdate(resp http.ResponseWriter, req *http.Requ
s.parseDC(req, &args.Datacenter)
s.parseToken(req, &args.Token)
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
var reply struct{}
if err := s.agent.RPC("Coordinate.Update", &args, &reply); err != nil {
return nil, err

View File

@ -122,6 +122,8 @@ type DNSServer struct {
// recursorEnabled stores whever the recursor handler is enabled as an atomic flag.
// the recursor handler is only enabled if recursors are configured. This flag is used during config hot-reloading
recursorEnabled uint32
defaultEnterpriseMeta structs.EnterpriseMeta
}
func NewDNSServer(a *Agent) (*DNSServer, error) {
@ -130,10 +132,11 @@ func NewDNSServer(a *Agent) (*DNSServer, error) {
altDomain := dns.Fqdn(strings.ToLower(a.config.DNSAltDomain))
srv := &DNSServer{
agent: a,
domain: domain,
altDomain: altDomain,
logger: a.logger.Named(logging.DNS),
agent: a,
domain: domain,
altDomain: altDomain,
logger: a.logger.Named(logging.DNS),
defaultEnterpriseMeta: *a.agentEnterpriseMeta(),
}
cfg, err := GetDNSConfig(a.config)
if err != nil {
@ -414,7 +417,7 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
AllowStale: cfg.AllowStale,
},
ServiceAddress: serviceAddress,
EnterpriseMeta: *structs.WildcardEnterpriseMetaInDefaultPartition(),
EnterpriseMeta: *d.defaultEnterpriseMeta.WildcardEnterpriseMetaForPartition(),
}
var sout structs.IndexedServiceNodes
@ -548,7 +551,7 @@ func (d *DNSServer) nameservers(cfg *dnsConfig, maxRecursionLevel int) (ns []dns
Service: structs.ConsulServiceName,
Connect: false,
Ingress: false,
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
EnterpriseMeta: d.defaultEnterpriseMeta,
})
if err != nil {
d.logger.Warn("Unable to get list of servers", "error", err)
@ -645,8 +648,8 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
// By default the query is in the default datacenter
datacenter := d.agent.config.Datacenter
// have to deref to clone it so we don't modify
var entMeta structs.EnterpriseMeta
// have to deref to clone it so we don't modify (start from the agent's defaults)
var entMeta = d.defaultEnterpriseMeta
// Get the QName without the domain suffix
qName := strings.ToLower(dns.Fqdn(req.Question[0].Name))
@ -1316,9 +1319,10 @@ func (d *DNSServer) preparedQueryLookup(cfg *dnsConfig, datacenter, query string
// send the local agent's data through to allow distance sorting
// relative to ourself on the server side.
Agent: structs.QuerySource{
Datacenter: d.agent.config.Datacenter,
Segment: d.agent.config.SegmentName,
Node: d.agent.config.NodeName,
Datacenter: d.agent.config.Datacenter,
Segment: d.agent.config.SegmentName,
Node: d.agent.config.NodeName,
NodePartition: d.agent.config.PartitionOrEmpty(),
},
}

View File

@ -1032,6 +1032,7 @@ func (s *HTTPHandlers) parseSource(req *http.Request, source *structs.QuerySourc
} else {
source.Node = node
}
source.NodePartition = s.agent.config.PartitionOrEmpty()
}
}

View File

@ -17,7 +17,8 @@ func (s *HTTPHandlers) parseEntMeta(req *http.Request, entMeta *structs.Enterpri
if queryNS := req.URL.Query().Get("ns"); queryNS != "" {
return BadRequestError{Reason: "Invalid query parameter: \"ns\" - Namespaces are a Consul Enterprise feature"}
}
return nil
return parseEntMetaPartition(req, entMeta)
}
func (s *HTTPHandlers) validateEnterpriseIntentionNamespace(logName, ns string, _ bool) error {
@ -74,7 +75,13 @@ func (s *HTTPHandlers) uiTemplateDataTransform(data map[string]interface{}) erro
return nil
}
// parseEntMetaPartition is a noop for the enterprise implementation.
func parseEntMetaPartition(req *http.Request, meta *structs.EnterpriseMeta) error {
if headerAP := req.Header.Get("X-Consul-Partition"); headerAP != "" {
return BadRequestError{Reason: "Invalid header: \"X-Consul-Partition\" - Partitions are a Consul Enterprise feature"}
}
if queryAP := req.URL.Query().Get("partition"); queryAP != "" {
return BadRequestError{Reason: "Invalid query parameter: \"partition\" - Partitions are a Consul Enterprise feature"}
}
return nil
}

View File

@ -58,6 +58,7 @@ type Config struct {
DiscardCheckOutput bool
NodeID types.NodeID
NodeName string
Partition string // this defaults if empty
TaggedAddresses map[string]string
}
@ -176,6 +177,8 @@ type State struct {
// Config is the agent config
config Config
agentEnterpriseMeta structs.EnterpriseMeta
// nodeInfoInSync tracks whether the server has our correct top-level
// node information in sync
nodeInfoInSync bool
@ -208,14 +211,15 @@ type State struct {
// NewState creates a new local state for the agent.
func NewState(c Config, logger hclog.Logger, tokens *token.Store) *State {
l := &State{
config: c,
logger: logger,
services: make(map[structs.ServiceID]*ServiceState),
checks: make(map[structs.CheckID]*CheckState),
checkAliases: make(map[structs.ServiceID]map[structs.CheckID]chan<- struct{}),
metadata: make(map[string]string),
tokens: tokens,
notifyHandlers: make(map[chan<- struct{}]struct{}),
config: c,
logger: logger,
services: make(map[structs.ServiceID]*ServiceState),
checks: make(map[structs.CheckID]*CheckState),
checkAliases: make(map[structs.ServiceID]map[structs.CheckID]chan<- struct{}),
metadata: make(map[string]string),
tokens: tokens,
notifyHandlers: make(map[chan<- struct{}]struct{}),
agentEnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(c.Partition),
}
l.SetDiscardCheckOutput(c.DiscardCheckOutput)
return l
@ -267,6 +271,10 @@ func (l *State) addServiceLocked(service *structs.NodeService, token string) err
service.ID = service.Service
}
if l.agentEnterpriseMeta.PartitionOrDefault() != service.PartitionOrDefault() {
return fmt.Errorf("cannot add service %q to node in partition %q", service.CompoundServiceID(), l.config.Partition)
}
l.setServiceStateLocked(&ServiceState{
Service: service,
Token: token,
@ -340,8 +348,8 @@ func (l *State) removeServiceLocked(id structs.ServiceID) error {
return nil
}
// Service returns the locally registered service that the
// agent is aware of and are being kept in sync with the server
// Service returns the locally registered service that the agent is aware of
// with this ID and are being kept in sync with the server.
func (l *State) Service(id structs.ServiceID) *structs.NodeService {
l.RLock()
defer l.RUnlock()
@ -353,9 +361,43 @@ func (l *State) Service(id structs.ServiceID) *structs.NodeService {
return s.Service
}
// Services returns the locally registered services that the
// ServicesByName returns all the locally registered service instances that the
// agent is aware of with this name and are being kept in sync with the server
func (l *State) ServicesByName(sn structs.ServiceName) []*structs.NodeService {
l.RLock()
defer l.RUnlock()
var found []*structs.NodeService
for id, s := range l.services {
if s.Deleted {
continue
}
if !sn.EnterpriseMeta.Matches(&id.EnterpriseMeta) {
continue
}
if s.Service.Service == sn.Name {
found = append(found, s.Service)
}
}
return found
}
// AllServices returns the locally registered services that the
// agent is aware of and are being kept in sync with the server
func (l *State) AllServices() map[structs.ServiceID]*structs.NodeService {
return l.listServices(false, nil)
}
// Services returns the locally registered services that the agent is aware of
// and are being kept in sync with the server
//
// Results are scoped to the provided namespace and partition.
func (l *State) Services(entMeta *structs.EnterpriseMeta) map[structs.ServiceID]*structs.NodeService {
return l.listServices(true, entMeta)
}
func (l *State) listServices(filtered bool, entMeta *structs.EnterpriseMeta) map[structs.ServiceID]*structs.NodeService {
l.RLock()
defer l.RUnlock()
@ -365,7 +407,7 @@ func (l *State) Services(entMeta *structs.EnterpriseMeta) map[structs.ServiceID]
continue
}
if !entMeta.Matches(&id.EnterpriseMeta) {
if filtered && !entMeta.Matches(&id.EnterpriseMeta) {
continue
}
m[id] = s.Service
@ -395,6 +437,10 @@ func (l *State) SetServiceState(s *ServiceState) {
l.Lock()
defer l.Unlock()
if l.agentEnterpriseMeta.PartitionOrDefault() != s.Service.PartitionOrDefault() {
return
}
l.setServiceStateLocked(s)
}
@ -483,15 +529,19 @@ func (l *State) addCheckLocked(check *structs.HealthCheck, token string) error {
check.Output = ""
}
// hard-set the node name and partition
check.Node = l.config.NodeName
check.EnterpriseMeta = structs.NewEnterpriseMetaWithPartition(
l.agentEnterpriseMeta.PartitionOrEmpty(),
check.NamespaceOrEmpty(),
)
// if there is a serviceID associated with the check, make sure it exists before adding it
// NOTE - This logic may be moved to be handled within the Agent's Addcheck method after a refactor
if _, ok := l.services[check.CompoundServiceID()]; check.ServiceID != "" && !ok {
return fmt.Errorf("Check %q refers to non-existent service %q", check.CheckID, check.ServiceID)
}
// hard-set the node name
check.Node = l.config.NodeName
l.setCheckStateLocked(&CheckState{
Check: check,
Token: token,
@ -510,6 +560,13 @@ func (l *State) AddAliasCheck(checkID structs.CheckID, srcServiceID structs.Serv
l.Lock()
defer l.Unlock()
if l.agentEnterpriseMeta.PartitionOrDefault() != checkID.PartitionOrDefault() {
return fmt.Errorf("cannot add alias check %q to node in partition %q", checkID.String(), l.config.Partition)
}
if l.agentEnterpriseMeta.PartitionOrDefault() != srcServiceID.PartitionOrDefault() {
return fmt.Errorf("cannot add alias check for %q to node in partition %q", srcServiceID.String(), l.config.Partition)
}
m, ok := l.checkAliases[srcServiceID]
if !ok {
m = make(map[structs.CheckID]chan<- struct{})
@ -663,11 +720,23 @@ func (l *State) Check(id structs.CheckID) *structs.HealthCheck {
return c.Check
}
// AllChecks returns the locally registered checks that the
// agent is aware of and are being kept in sync with the server
func (l *State) AllChecks() map[structs.CheckID]*structs.HealthCheck {
return l.listChecks(false, nil)
}
// Checks returns the locally registered checks that the
// agent is aware of and are being kept in sync with the server
//
// Results are scoped to the provided namespace and partition.
func (l *State) Checks(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck {
return l.listChecks(true, entMeta)
}
func (l *State) listChecks(filtered bool, entMeta *structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck {
m := make(map[structs.CheckID]*structs.HealthCheck)
for id, c := range l.CheckStates(entMeta) {
for id, c := range l.listCheckStates(filtered, entMeta) {
m[id] = c.Check
}
return m
@ -719,6 +788,10 @@ func (l *State) SetCheckState(c *CheckState) {
l.Lock()
defer l.Unlock()
if l.agentEnterpriseMeta.PartitionOrDefault() != c.Check.PartitionOrDefault() {
return
}
l.setCheckStateLocked(c)
}
@ -737,11 +810,25 @@ func (l *State) setCheckStateLocked(c *CheckState) {
l.TriggerSyncChanges()
}
// AllCheckStates returns a shallow copy of all health check state records.
// The map contains a shallow copy of the current check states.
//
// The defer timers still point to the original values and must not be modified.
func (l *State) AllCheckStates() map[structs.CheckID]*CheckState {
return l.listCheckStates(false, nil)
}
// CheckStates returns a shallow copy of all health check state records.
// The map contains a shallow copy of the current check states.
//
// The defer timers still point to the original values and must not be modified.
//
// Results are scoped to the provided namespace and partition.
func (l *State) CheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState {
return l.listCheckStates(true, entMeta)
}
func (l *State) listCheckStates(filtered bool, entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState {
l.RLock()
defer l.RUnlock()
@ -750,7 +837,7 @@ func (l *State) CheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID
if c.Deleted {
continue
}
if !entMeta.Matches(&id.EnterpriseMeta) {
if filtered && !entMeta.Matches(&id.EnterpriseMeta) {
continue
}
m[id] = c.Clone()
@ -758,12 +845,27 @@ func (l *State) CheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID
return m
}
// AllCriticalCheckStates returns the locally registered checks that the
// agent is aware of and are being kept in sync with the server.
// The map contains a shallow copy of the current check states.
//
// The defer timers still point to the original values and must not be modified.
func (l *State) AllCriticalCheckStates() map[structs.CheckID]*CheckState {
return l.listCriticalCheckStates(false, nil)
}
// CriticalCheckStates returns the locally registered checks that the
// agent is aware of and are being kept in sync with the server.
// The map contains a shallow copy of the current check states.
//
// The defer timers still point to the original values and must not be modified.
//
// Results are scoped to the provided namespace and partition.
func (l *State) CriticalCheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState {
return l.listCriticalCheckStates(true, entMeta)
}
func (l *State) listCriticalCheckStates(filtered bool, entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState {
l.RLock()
defer l.RUnlock()
@ -772,7 +874,7 @@ func (l *State) CriticalCheckStates(entMeta *structs.EnterpriseMeta) map[structs
if c.Deleted || !c.Critical() {
continue
}
if !entMeta.Matches(&id.EnterpriseMeta) {
if filtered && !entMeta.Matches(&id.EnterpriseMeta) {
continue
}
m[id] = c.Clone()
@ -887,7 +989,7 @@ func (l *State) updateSyncState() error {
AllowStale: true,
MaxStaleDuration: fullSyncReadMaxStale,
},
EnterpriseMeta: *structs.WildcardEnterpriseMetaInDefaultPartition(),
EnterpriseMeta: *l.agentEnterpriseMeta.WildcardEnterpriseMetaForPartition(),
}
var out1 structs.IndexedNodeServiceList
@ -958,7 +1060,7 @@ func (l *State) updateSyncState() error {
if ls == nil {
// The consul service is managed automatically and does
// not need to be deregistered
if id == structs.ConsulCompoundServiceID {
if structs.IsConsulServiceID(id) {
continue
}
@ -1002,7 +1104,7 @@ func (l *State) updateSyncState() error {
if lc == nil {
// The Serf check is created automatically and does not
// need to be deregistered.
if id == structs.SerfCompoundCheckID {
if structs.IsSerfCheckID(id) {
l.logger.Debug("Skipping remote check since it is managed automatically", "check", structs.SerfCheckID)
continue
}
@ -1366,6 +1468,7 @@ func (l *State) syncNodeInfo() error {
Address: l.config.AdvertiseAddr,
TaggedAddresses: l.config.TaggedAddresses,
NodeMeta: l.metadata,
EnterpriseMeta: l.agentEnterpriseMeta,
WriteRequest: structs.WriteRequest{Token: at},
}
var out struct{}

View File

@ -94,9 +94,10 @@ func (s *HTTPHandlers) preparedQueryExecute(id string, resp http.ResponseWriter,
args := structs.PreparedQueryExecuteRequest{
QueryIDOrName: id,
Agent: structs.QuerySource{
Node: s.agent.config.NodeName,
Datacenter: s.agent.config.Datacenter,
Segment: s.agent.config.SegmentName,
Node: s.agent.config.NodeName,
NodePartition: s.agent.config.PartitionOrEmpty(),
Datacenter: s.agent.config.Datacenter,
Segment: s.agent.config.SegmentName,
},
}
s.parseSource(req, &args.Source)
@ -178,9 +179,10 @@ func (s *HTTPHandlers) preparedQueryExplain(id string, resp http.ResponseWriter,
args := structs.PreparedQueryExecuteRequest{
QueryIDOrName: id,
Agent: structs.QuerySource{
Node: s.agent.config.NodeName,
Datacenter: s.agent.config.Datacenter,
Segment: s.agent.config.SegmentName,
Node: s.agent.config.NodeName,
NodePartition: s.agent.config.PartitionOrEmpty(),
Datacenter: s.agent.config.Datacenter,
Segment: s.agent.config.SegmentName,
},
}
s.parseSource(req, &args.Source)

View File

@ -86,7 +86,7 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token},
ServiceName: s.proxyCfg.DestinationServiceName,
EnterpriseMeta: structs.NewEnterpriseMetaInDefaultPartition(s.proxyID.NamespaceOrEmpty()),
EnterpriseMeta: s.proxyID.EnterpriseMeta,
}, intentionUpstreamsID, s.ch)
if err != nil {
return snap, err
@ -97,7 +97,7 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
Name: structs.MeshConfigMesh,
Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
EnterpriseMeta: *s.proxyID.DefaultEnterpriseMetaForPartition(),
}, meshConfigEntryID, s.ch)
if err != nil {
return snap, err
@ -162,6 +162,7 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
case structs.UpstreamDestTypeService:
fallthrough
// TODO (partition): pass Partition to DiscoveryChainRequest?
case "": // Treat unset as the default Service type
err = s.cache.Notify(ctx, cachetype.CompiledDiscoveryChainName, &structs.DiscoveryChainRequest{
Datacenter: s.source.Datacenter,
@ -228,7 +229,7 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u cache.UpdateEv
// Use the centralized upstream defaults if they exist and there isn't specific configuration for this upstream
// This is only relevant to upstreams from intentions because for explicit upstreams the defaulting is handled
// by the ResolveServiceConfig endpoint.
wildcardSID := structs.NewServiceID(structs.WildcardSpecifier, structs.WildcardEnterpriseMetaInDefaultPartition())
wildcardSID := structs.NewServiceID(structs.WildcardSpecifier, s.proxyID.WildcardEnterpriseMetaForPartition())
defaults, ok := snap.ConnectProxy.UpstreamConfig[wildcardSID.String()]
if ok {
u = defaults

View File

@ -103,6 +103,7 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u cache.Update
id: u.Identifier(),
name: u.DestinationName,
namespace: u.DestinationNamespace,
partition: u.DestinationPartition,
datacenter: s.source.Datacenter,
}
up := &handlerUpstreams{handlerState: s.handlerState}
@ -146,6 +147,7 @@ func makeUpstream(g *structs.GatewayService) structs.Upstream {
upstream := structs.Upstream{
DestinationName: g.Service.Name,
DestinationNamespace: g.Service.NamespaceOrDefault(),
DestinationPartition: g.Gateway.PartitionOrDefault(),
LocalBindPort: g.Port,
IngressHosts: g.Hosts,
// Pass the protocol that was configured on the ingress listener in order

View File

@ -145,7 +145,7 @@ func (m *Manager) syncState() {
defer m.mu.Unlock()
// Traverse the local state and ensure all proxy services are registered
services := m.State.Services(structs.WildcardEnterpriseMetaInDefaultPartition())
services := m.State.AllServices()
for sid, svc := range services {
if svc.Kind != structs.ServiceKindConnectProxy &&
svc.Kind != structs.ServiceKindTerminatingGateway &&

View File

@ -199,7 +199,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
setup: func(t *testing.T, types *TestCacheTypes) {
// Note that we deliberately leave the 'geo-cache' prepared query to time out
types.health.Set(dbHealthCacheKey, &structs.IndexedCheckServiceNodes{
Nodes: TestUpstreamNodes(t),
Nodes: TestUpstreamNodes(t, db.Name),
})
types.compiledChain.Set(dbChainCacheKey, &structs.DiscoveryChainResponse{
Chain: dbDefaultChain(),
@ -225,7 +225,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
WatchedUpstreams: nil, // Clone() clears this out
WatchedUpstreamEndpoints: map[string]map[string]structs.CheckServiceNodes{
db.String(): {
"db.default.dc1": TestUpstreamNodes(t),
"db.default.dc1": TestUpstreamNodes(t, db.Name),
},
},
WatchedGateways: nil, // Clone() clears this out
@ -252,7 +252,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
setup: func(t *testing.T, types *TestCacheTypes) {
// Note that we deliberately leave the 'geo-cache' prepared query to time out
types.health.Set(db_v1_HealthCacheKey, &structs.IndexedCheckServiceNodes{
Nodes: TestUpstreamNodes(t),
Nodes: TestUpstreamNodes(t, db.Name),
})
types.health.Set(db_v2_HealthCacheKey, &structs.IndexedCheckServiceNodes{
Nodes: TestUpstreamNodesAlternate(t),
@ -281,7 +281,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
WatchedUpstreams: nil, // Clone() clears this out
WatchedUpstreamEndpoints: map[string]map[string]structs.CheckServiceNodes{
db.String(): {
"v1.db.default.dc1": TestUpstreamNodes(t),
"v1.db.default.dc1": TestUpstreamNodes(t, db.Name),
"v2.db.default.dc1": TestUpstreamNodesAlternate(t),
},
},
@ -330,6 +330,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
rootsCacheKey, leafCacheKey,
roots,
webProxyCopy.(*structs.NodeService),
local.Config{},
expectSnapCopy.(*ConfigSnapshot),
)
})
@ -349,13 +350,14 @@ func testManager_BasicLifecycle(
rootsCacheKey, leafCacheKey string,
roots *structs.IndexedCARoots,
webProxy *structs.NodeService,
agentConfig local.Config,
expectSnap *ConfigSnapshot,
) {
c := TestCacheWithTypes(t, types)
require := require.New(t)
logger := testutil.Logger(t)
state := local.NewState(local.Config{}, logger, &token.Store{})
state := local.NewState(agentConfig, logger, &token.Store{})
source := &structs.QuerySource{Datacenter: "dc1"}
// Stub state syncing

View File

@ -29,12 +29,14 @@ func (s *handlerMeshGateway) initialize(ctx context.Context) (ConfigSnapshot, er
return snap, err
}
wildcardEntMeta := s.proxyID.WildcardEnterpriseMetaForPartition()
// Watch for all services
err = s.cache.Notify(ctx, cachetype.CatalogServiceListName, &structs.DCSpecificRequest{
Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token},
Source: *s.source,
EnterpriseMeta: *structs.WildcardEnterpriseMetaInDefaultPartition(),
EnterpriseMeta: *wildcardEntMeta,
}, serviceListWatchID, s.ch)
if err != nil {
@ -85,7 +87,7 @@ func (s *handlerMeshGateway) initialize(ctx context.Context) (ConfigSnapshot, er
Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token},
Kind: structs.ServiceResolver,
EnterpriseMeta: *structs.WildcardEnterpriseMetaInDefaultPartition(),
EnterpriseMeta: *wildcardEntMeta,
}, serviceResolversWatchID, s.ch)
if err != nil {
s.logger.Named(logging.MeshGateway).

View File

@ -969,6 +969,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, snap.IngressGateway.Upstreams[key], structs.Upstreams{
{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "api",
LocalBindPort: 9999,
Config: map[string]interface{}{

View File

@ -119,7 +119,7 @@ func TestIntentions() *structs.IndexedIntentionMatches {
// TestUpstreamNodes returns a sample service discovery result useful to
// mocking service discovery cache results.
func TestUpstreamNodes(t testing.T) structs.CheckServiceNodes {
func TestUpstreamNodes(t testing.T, service string) structs.CheckServiceNodes {
return structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{
@ -127,8 +127,9 @@ func TestUpstreamNodes(t testing.T) structs.CheckServiceNodes {
Node: "test1",
Address: "10.10.1.1",
Datacenter: "dc1",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
Service: structs.TestNodeService(t),
Service: structs.TestNodeServiceWithName(t, service),
},
structs.CheckServiceNode{
Node: &structs.Node{
@ -136,12 +137,53 @@ func TestUpstreamNodes(t testing.T) structs.CheckServiceNodes {
Node: "test2",
Address: "10.10.1.2",
Datacenter: "dc1",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
Service: structs.TestNodeService(t),
Service: structs.TestNodeServiceWithName(t, service),
},
}
}
// TestPreparedQueryNodes returns instances of a service spread across two datacenters.
// The service instance names use a "-target" suffix to ensure we don't use the
// prepared query's name for SAN validation.
// The name of prepared queries won't always match the name of the service they target.
func TestPreparedQueryNodes(t testing.T, query string) structs.CheckServiceNodes {
nodes := structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test1",
Node: "test1",
Address: "10.10.1.1",
Datacenter: "dc1",
},
Service: &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
Service: query + "-sidecar-proxy",
Port: 8080,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: query + "-target",
},
},
},
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test2",
Node: "test2",
Address: "10.20.1.2",
Datacenter: "dc2",
},
Service: &structs.NodeService{
Kind: structs.ServiceKindTypical,
Service: query + "-target",
Port: 8080,
Connect: structs.ServiceConnect{Native: true},
},
},
}
return nodes
}
func TestUpstreamNodesInStatus(t testing.T, status string) structs.CheckServiceNodes {
return structs.CheckServiceNodes{
structs.CheckServiceNode{
@ -243,29 +285,6 @@ func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServi
}
}
func TestUpstreamNodesDC3(t testing.T) structs.CheckServiceNodes {
return structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test1",
Node: "test1",
Address: "10.30.1.1",
Datacenter: "dc3",
},
Service: structs.TestNodeService(t),
},
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test2",
Node: "test2",
Address: "10.30.1.2",
Datacenter: "dc3",
},
Service: structs.TestNodeService(t),
},
}
}
func TestUpstreamNodesAlternate(t testing.T) structs.CheckServiceNodes {
return structs.CheckServiceNodes{
structs.CheckServiceNode{
@ -682,12 +701,12 @@ func TestConfigSnapshot(t testing.T) *ConfigSnapshot {
},
WatchedUpstreamEndpoints: map[string]map[string]structs.CheckServiceNodes{
"db": {
"db.default.dc1": TestUpstreamNodes(t),
"db.default.dc1": TestUpstreamNodes(t, "db"),
},
},
},
PreparedQueryEndpoints: map[string]structs.CheckServiceNodes{
"prepared_query:geo-cache": TestUpstreamNodes(t),
"prepared_query:geo-cache": TestPreparedQueryNodes(t, "geo-cache"),
},
Intentions: nil, // no intentions defined
IntentionsSet: true,
@ -801,6 +820,9 @@ func testConfigSnapshotDiscoveryChain(t testing.T, variation string, additionalE
ConfigSnapshotUpstreams: setupTestVariationConfigEntriesAndSnapshot(
t, variation, leaf, additionalEntries...,
),
PreparedQueryEndpoints: map[string]structs.CheckServiceNodes{
"prepared_query:geo-cache": TestPreparedQueryNodes(t, "geo-cache"),
},
Intentions: nil, // no intentions defined
IntentionsSet: true,
},
@ -1335,7 +1357,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
},
WatchedUpstreamEndpoints: map[string]map[string]structs.CheckServiceNodes{
"db": {
"db.default.dc1": TestUpstreamNodes(t),
"db.default.dc1": TestUpstreamNodes(t, "db"),
},
},
UpstreamConfig: upstreams.ToMap(),
@ -1402,7 +1424,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
}
case "splitter-with-resolver-redirect-multidc":
snap.WatchedUpstreamEndpoints["db"] = map[string]structs.CheckServiceNodes{
"v1.db.default.dc1": TestUpstreamNodes(t),
"v1.db.default.dc1": TestUpstreamNodes(t, "db"),
"v2.db.default.dc2": TestUpstreamNodesDC2(t),
}
case "chain-and-splitter":
@ -1410,7 +1432,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
case "chain-and-router":
case "http-multiple-services":
snap.WatchedUpstreamEndpoints["foo"] = map[string]structs.CheckServiceNodes{
"foo.default.dc1": TestUpstreamNodes(t),
"foo.default.dc1": TestUpstreamNodes(t, "foo"),
}
snap.WatchedUpstreamEndpoints["bar"] = map[string]structs.CheckServiceNodes{
"bar.default.dc1": TestUpstreamNodesAlternate(t),
@ -1727,7 +1749,7 @@ func testConfigSnapshotTerminatingGateway(t testing.T, populateServices bool) *C
}
if populateServices {
web := structs.NewServiceName("web", nil)
webNodes := TestUpstreamNodes(t)
webNodes := TestUpstreamNodes(t, web.Name)
webNodes[0].Service.Meta = map[string]string{
"version": "1",
}

View File

@ -229,7 +229,8 @@ func (s *handlerUpstreams) resetWatchesFromChain(
// Outside of transparent mode we only watch the chain target, B,
// since A is a virtual service and traffic will not be sent to it.
if !watchedChainEndpoints && s.proxyCfg.Mode == structs.ProxyModeTransparent {
chainEntMeta := structs.NewEnterpriseMetaInDefaultPartition(chain.Namespace)
// TODO(partitions): add partition to the disco chain
chainEntMeta := structs.NewEnterpriseMetaWithPartition("" /*TODO*/, chain.Namespace)
opts := targetWatchOpts{
upstreamID: id,
@ -344,6 +345,7 @@ type discoveryChainWatchOpts struct {
id string
name string
namespace string
partition string
datacenter string
cfg reducedUpstreamConfig
meshGateway structs.MeshGatewayConfig

View File

@ -51,7 +51,7 @@ func (h *Server) Subscribe(req *pbsubscribe.SubscribeRequest, serverStream pbsub
logger.Trace("new subscription")
defer logger.Trace("subscription closed")
entMeta := structs.NewEnterpriseMetaInDefaultPartition(req.Namespace)
entMeta := structs.NewEnterpriseMetaWithPartition(req.Partition, req.Namespace)
authz, err := h.Backend.ResolveTokenAndDefaultMeta(req.Token, &entMeta, nil)
if err != nil {
return err
@ -94,6 +94,7 @@ func toStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta structs
Token: req.Token,
Index: req.Index,
Namespace: entMeta.NamespaceOrEmpty(),
Partition: entMeta.PartitionOrEmpty(),
}
}

View File

@ -29,6 +29,7 @@ func newMaterializerRequest(srvReq structs.ServiceSpecificRequest) func(index ui
Datacenter: srvReq.Datacenter,
Index: index,
Namespace: srvReq.EnterpriseMeta.NamespaceOrEmpty(),
Partition: srvReq.EnterpriseMeta.PartitionOrEmpty(),
}
if srvReq.Connect {
req.Topic = pbsubscribe.Topic_ServiceHealthConnect

View File

@ -398,6 +398,7 @@ func mergeServiceConfig(defaults *structs.ServiceConfigResponse, service *struct
remoteUpstreams[us.Upstream] = structs.Upstream{
DestinationNamespace: us.Upstream.NamespaceOrDefault(),
DestinationPartition: us.Upstream.PartitionOrDefault(),
DestinationName: us.Upstream.ID,
Config: us.Config,
MeshGateway: parsed.MeshGateway,

View File

@ -117,6 +117,7 @@ func TestServiceManager_RegisterSidecar(t *testing.T) {
{
DestinationName: "redis",
DestinationNamespace: "default",
DestinationPartition: "default",
LocalBindPort: 5000,
},
},
@ -147,6 +148,7 @@ func TestServiceManager_RegisterSidecar(t *testing.T) {
{
DestinationName: "redis",
DestinationNamespace: "default",
DestinationPartition: "default",
LocalBindPort: 5000,
Config: map[string]interface{}{
"protocol": "tcp",
@ -348,6 +350,7 @@ func TestServiceManager_PersistService_API(t *testing.T) {
{
DestinationName: "redis",
DestinationNamespace: "default",
DestinationPartition: "default",
LocalBindPort: 5000,
},
},
@ -375,6 +378,7 @@ func TestServiceManager_PersistService_API(t *testing.T) {
{
DestinationName: "redis",
DestinationNamespace: "default",
DestinationPartition: "default",
LocalBindPort: 5000,
Config: map[string]interface{}{
"protocol": "tcp",
@ -567,6 +571,7 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) {
upstreams = [{
destination_name = "redis"
destination_namespace = "default"
destination_partition = "default"
local_bind_port = 5000
}]
}
@ -612,6 +617,7 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) {
DestinationType: "service",
DestinationName: "redis",
DestinationNamespace: "default",
DestinationPartition: "default",
LocalBindPort: 5000,
Config: map[string]interface{}{
"protocol": "tcp",
@ -909,6 +915,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zap",
},
},
@ -924,6 +931,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zap",
Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{
@ -970,6 +978,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zip",
LocalBindPort: 8080,
Config: map[string]interface{}{
@ -994,6 +1003,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zip",
LocalBindPort: 8080,
Config: map[string]interface{}{
@ -1002,6 +1012,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
},
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zap",
Config: map[string]interface{}{
"protocol": "grpc",
@ -1038,6 +1049,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zip",
LocalBindPort: 8080,
Config: map[string]interface{}{
@ -1058,6 +1070,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zip",
LocalBindPort: 8080,
Config: map[string]interface{}{
@ -1098,6 +1111,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zap",
},
},
@ -1116,6 +1130,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zap",
Config: map[string]interface{}{},
MeshGateway: structs.MeshGatewayConfig{
@ -1156,6 +1171,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zap",
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeNone,
@ -1177,6 +1193,7 @@ func Test_mergeServiceConfig_UpstreamOverrides(t *testing.T) {
Upstreams: structs.Upstreams{
structs.Upstream{
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zap",
Config: map[string]interface{}{},
MeshGateway: structs.MeshGatewayConfig{

View File

@ -126,7 +126,7 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
// it doesn't seem to be necessary - even with thousands of services this is
// not expensive to compute.
usedPorts := make(map[int]struct{})
for _, otherNS := range a.State.Services(structs.WildcardEnterpriseMetaInDefaultPartition()) {
for _, otherNS := range a.State.AllServices() {
// Check if other port is in auto-assign range
if otherNS.Port >= a.config.ConnectSidecarMinPort &&
otherNS.Port <= a.config.ConnectSidecarMaxPort {

View File

@ -19,8 +19,3 @@ const (
ConsulServiceID = "consul"
ConsulServiceName = "consul"
)
var (
ConsulCompoundServiceID = NewServiceID(ConsulServiceID, nil) // TODO(partitions): delete this in favor of IsConsulServiceID(ServiceID)
SerfCompoundCheckID = NewCheckID(SerfCheckID, nil) // TODO(partitions): delete this in favor of IsSerfCheckID(CheckID)
)

View File

@ -1343,6 +1343,7 @@ type DiscoveryChainRequest struct {
Name string
EvaluateInDatacenter string
EvaluateInNamespace string
EvaluateInPartition string
// NOTE: Partition is not represented here by design. Do not add it.
@ -1386,6 +1387,7 @@ func (r *DiscoveryChainRequest) CacheInfo() cache.RequestInfo {
Name string
EvaluateInDatacenter string
EvaluateInNamespace string
EvaluateInPartition string
OverrideMeshGateway MeshGatewayConfig
OverrideProtocol string
OverrideConnectTimeout time.Duration
@ -1394,6 +1396,7 @@ func (r *DiscoveryChainRequest) CacheInfo() cache.RequestInfo {
Name: r.Name,
EvaluateInDatacenter: r.EvaluateInDatacenter,
EvaluateInNamespace: r.EvaluateInNamespace,
EvaluateInPartition: r.EvaluateInPartition,
OverrideMeshGateway: r.OverrideMeshGateway,
OverrideProtocol: r.OverrideProtocol,
OverrideConnectTimeout: r.OverrideConnectTimeout,

View File

@ -246,7 +246,6 @@ func (t *ConnectProxyConfig) UnmarshalJSON(data []byte) (err error) {
}
return nil
}
func (c *ConnectProxyConfig) MarshalJSON() ([]byte, error) {
@ -343,6 +342,7 @@ type Upstream struct {
// on service definitions in various places.
DestinationType string `alias:"destination_type"`
DestinationNamespace string `json:",omitempty" alias:"destination_namespace"`
DestinationPartition string `json:",omitempty" alias:"destination_partition"`
DestinationName string `alias:"destination_name"`
// Datacenter that the service discovery request should be run against. Note
@ -385,6 +385,7 @@ func (t *Upstream) UnmarshalJSON(data []byte) (err error) {
type Alias Upstream
aux := &struct {
DestinationTypeSnake string `json:"destination_type"`
DestinationPartitionSnake string `json:"destination_partition"`
DestinationNamespaceSnake string `json:"destination_namespace"`
DestinationNameSnake string `json:"destination_name"`
@ -409,6 +410,9 @@ func (t *Upstream) UnmarshalJSON(data []byte) (err error) {
if t.DestinationNamespace == "" {
t.DestinationNamespace = aux.DestinationNamespaceSnake
}
if t.DestinationPartition == "" {
t.DestinationPartition = aux.DestinationPartitionSnake
}
if t.DestinationName == "" {
t.DestinationName = aux.DestinationNameSnake
}
@ -465,6 +469,7 @@ func (u *Upstream) ToAPI() api.Upstream {
return api.Upstream{
DestinationType: api.UpstreamDestType(u.DestinationType),
DestinationNamespace: u.DestinationNamespace,
DestinationPartition: u.DestinationPartition,
DestinationName: u.DestinationName,
Datacenter: u.Datacenter,
LocalBindAddress: u.LocalBindAddress,
@ -485,6 +490,7 @@ func (u *Upstream) ToAPI() api.Upstream {
func (u *Upstream) ToKey() UpstreamKey {
return UpstreamKey{
DestinationType: u.DestinationType,
DestinationPartition: u.DestinationPartition,
DestinationNamespace: u.DestinationNamespace,
DestinationName: u.DestinationName,
Datacenter: u.Datacenter,
@ -514,15 +520,17 @@ func (u Upstream) UpstreamAddressToString() string {
type UpstreamKey struct {
DestinationType string
DestinationName string
DestinationPartition string
DestinationNamespace string
Datacenter string
}
func (k UpstreamKey) String() string {
return fmt.Sprintf(
"[type=%q, name=%q, namespace=%q, datacenter=%q]",
"[type=%q, name=%q, partition=%q, namespace=%q, datacenter=%q]",
k.DestinationType,
k.DestinationName,
k.DestinationPartition,
k.DestinationNamespace,
k.Datacenter,
)
@ -537,6 +545,7 @@ func (u *Upstream) String() string {
func UpstreamFromAPI(u api.Upstream) Upstream {
return Upstream{
DestinationType: string(u.DestinationType),
DestinationPartition: u.DestinationPartition,
DestinationNamespace: u.DestinationNamespace,
DestinationName: u.DestinationName,
Datacenter: u.Datacenter,

View File

@ -91,6 +91,9 @@ func (s *ServiceDefinition) NodeService() *NodeService {
if ns.Proxy.Upstreams[i].DestinationNamespace == "" {
ns.Proxy.Upstreams[i].DestinationNamespace = ns.EnterpriseMeta.NamespaceOrEmpty()
}
if ns.Proxy.Upstreams[i].DestinationPartition == "" {
ns.Proxy.Upstreams[i].DestinationPartition = ns.EnterpriseMeta.PartitionOrEmpty()
}
}
ns.Proxy.Expose = s.Proxy.Expose
}

View File

@ -161,6 +161,11 @@ var expectedFieldConfigUpstreams bexpr.FieldConfigurations = bexpr.FieldConfigur
CoerceFn: bexpr.CoerceString,
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches},
},
"DestinationPartition": &bexpr.FieldConfiguration{
StructFieldName: "DestinationPartition",
CoerceFn: bexpr.CoerceString,
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches},
},
"DestinationName": &bexpr.FieldConfiguration{
StructFieldName: "DestinationName",
CoerceFn: bexpr.CoerceString,

View File

@ -40,11 +40,15 @@ func TestRegisterIngressGateway(t testing.T) *RegisterRequest {
}
}
// TestNodeService returns a *NodeService representing a valid regular service.
// TestNodeService returns a *NodeService representing a valid regular service: "web".
func TestNodeService(t testing.T) *NodeService {
return TestNodeServiceWithName(t, "web")
}
func TestNodeServiceWithName(t testing.T, name string) *NodeService {
return &NodeService{
Kind: ServiceKindTypical,
Service: "web",
Service: name,
Port: 8080,
}
}

View File

@ -55,6 +55,9 @@ func TestAddDefaultsToUpstreams(t testing.T, upstreams []Upstream, entMeta Enter
if ups[i].DestinationNamespace == "" {
ups[i].DestinationNamespace = entMeta.NamespaceOrEmpty()
}
if ups[i].DestinationPartition == "" {
ups[i].DestinationPartition = entMeta.PartitionOrEmpty()
}
}
return ups
}

View File

@ -152,11 +152,14 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
KV: &structs.TxnKVOp{
Verb: verb,
DirEnt: structs.DirEntry{
Key: in.KV.Key,
Value: in.KV.Value,
Flags: in.KV.Flags,
Session: in.KV.Session,
EnterpriseMeta: structs.NewEnterpriseMetaInDefaultPartition(in.KV.Namespace),
Key: in.KV.Key,
Value: in.KV.Value,
Flags: in.KV.Flags,
Session: in.KV.Session,
EnterpriseMeta: structs.NewEnterpriseMetaWithPartition(
in.KV.Partition,
in.KV.Namespace,
),
RaftIndex: structs.RaftIndex{
ModifyIndex: in.KV.Index,
},
@ -182,6 +185,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
Node: structs.Node{
ID: types.NodeID(node.ID),
Node: node.Node,
Partition: node.Partition,
Address: node.Address,
Datacenter: node.Datacenter,
TaggedAddresses: node.TaggedAddresses,
@ -216,7 +220,10 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
Warning: svc.Weights.Warning,
},
EnableTagOverride: svc.EnableTagOverride,
EnterpriseMeta: structs.NewEnterpriseMetaInDefaultPartition(svc.Namespace),
EnterpriseMeta: structs.NewEnterpriseMetaWithPartition(
svc.Partition,
svc.Namespace,
),
RaftIndex: structs.RaftIndex{
ModifyIndex: svc.ModifyIndex,
},
@ -274,7 +281,10 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
Timeout: timeout,
DeregisterCriticalServiceAfter: deregisterCriticalServiceAfter,
},
EnterpriseMeta: structs.NewEnterpriseMetaInDefaultPartition(check.Namespace),
EnterpriseMeta: structs.NewEnterpriseMetaWithPartition(
check.Partition,
check.Namespace,
),
RaftIndex: structs.RaftIndex{
ModifyIndex: check.ModifyIndex,
},

View File

@ -603,6 +603,9 @@ func (s *HTTPHandlers) UIMetricsProxy(resp http.ResponseWriter, req *http.Reques
s.clearTokenFromHeaders(req)
var entMeta structs.EnterpriseMeta
if err := parseEntMetaPartition(req, &entMeta); err != nil {
return nil, err
}
authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &entMeta, nil)
if err != nil {
return nil, err
@ -611,9 +614,8 @@ func (s *HTTPHandlers) UIMetricsProxy(resp http.ResponseWriter, req *http.Reques
// This endpoint requires wildcard read on all services and all nodes.
//
// In enterprise it requires this _in all namespaces_ too.
wildMeta := structs.WildcardEnterpriseMetaInDefaultPartition()
var authzContext acl.AuthorizerContext
wildMeta.FillAuthzContext(&authzContext)
entMeta.WildcardEnterpriseMetaForPartition().FillAuthzContext(&authzContext)
if authz.NodeReadAll(&authzContext) != acl.Allow || authz.ServiceReadAll(&authzContext) != acl.Allow {
return nil, acl.ErrPermissionDenied

File diff suppressed because one or more lines are too long

View File

@ -187,6 +187,7 @@ func (a *Agent) shouldProcessUserEvent(msg *UserEvent) bool {
}
// Scan for a match
// NOTE: this only works in the default partition and default namespace
services := a.State.Services(structs.DefaultEnterpriseMetaInDefaultPartition())
found := false
OUTER:

View File

@ -535,18 +535,34 @@ func (s *ResourceGenerator) makeUpstreamClusterForPreparedQuery(upstream structs
}
}
spiffeID := connect.SpiffeIDService{
Host: cfgSnap.Roots.TrustDomain,
Namespace: upstream.DestinationNamespace,
Datacenter: dc,
Service: upstream.DestinationName,
endpoints := cfgSnap.ConnectProxy.PreparedQueryEndpoints[upstream.Identifier()]
var (
spiffeIDs = make([]connect.SpiffeIDService, 0)
seen = make(map[string]struct{})
)
for _, e := range endpoints {
id := fmt.Sprintf("%s/%s", e.Node.Datacenter, e.Service.CompoundServiceName())
if _, ok := seen[id]; ok {
continue
}
seen[id] = struct{}{}
// TODO(partitions) Store partition
name := e.Service.Proxy.DestinationServiceName
if e.Service.Connect.Native {
name = e.Service.Service
}
spiffeIDs = append(spiffeIDs, connect.SpiffeIDService{
Host: cfgSnap.Roots.TrustDomain,
Namespace: e.Service.NamespaceOrDefault(),
Partition: e.Service.PartitionOrDefault(),
Datacenter: e.Node.Datacenter,
Service: name,
})
}
// Enable TLS upstream with the configured client certificate.
commonTLSContext := makeCommonTLSContextFromLeaf(cfgSnap, cfgSnap.Leaf())
err = injectSANMatcher(commonTLSContext, spiffeID)
err = injectSANMatcher(commonTLSContext, spiffeIDs...)
if err != nil {
return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", sni, err)
}

Some files were not shown because too many files have changed in this diff Show More