Browse Source

made node parition scoped (#19794)

* made node parition scoped

* removed namespace from node testdata
pull/19797/head
aahel 12 months ago committed by GitHub
parent
commit
ac9261ac3e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      internal/catalog/catalogtest/integration_test_data/v2beta1/node-1-health.json
  2. 1
      internal/catalog/catalogtest/integration_test_data/v2beta1/node-1.json
  3. 1
      internal/catalog/catalogtest/integration_test_data/v2beta1/node-2-health.json
  4. 1
      internal/catalog/catalogtest/integration_test_data/v2beta1/node-2.json
  5. 1
      internal/catalog/catalogtest/integration_test_data/v2beta1/node-3-health.json
  6. 1
      internal/catalog/catalogtest/integration_test_data/v2beta1/node-3.json
  7. 1
      internal/catalog/catalogtest/integration_test_data/v2beta1/node-4-health.json
  8. 1
      internal/catalog/catalogtest/integration_test_data/v2beta1/node-4.json
  9. 10
      internal/catalog/catalogtest/test_integration_v2beta1.go
  10. 4
      internal/catalog/catalogtest/test_lifecycle_v2beta1.go
  11. 16
      internal/catalog/internal/controllers/nodehealth/controller_test.go
  12. 28
      internal/catalog/internal/controllers/workloadhealth/controller_test.go
  13. 9
      internal/catalog/internal/mappers/nodemapper/node_mapper.go
  14. 11
      internal/catalog/internal/mappers/nodemapper/node_mapper_test.go
  15. 8
      internal/catalog/internal/types/health_status_test.go
  16. 11
      internal/catalog/internal/types/node.go
  17. 2
      proto-public/pbcatalog/v2beta1/node.pb.go
  18. 5
      proto-public/pbcatalog/v2beta1/node.proto

1
internal/catalog/catalogtest/integration_test_data/v2beta1/node-1-health.json

@ -20,7 +20,6 @@
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-1"

1
internal/catalog/catalogtest/integration_test_data/v2beta1/node-1.json

@ -7,7 +7,6 @@
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-1"

1
internal/catalog/catalogtest/integration_test_data/v2beta1/node-2-health.json

@ -20,7 +20,6 @@
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-2"

1
internal/catalog/catalogtest/integration_test_data/v2beta1/node-2.json

@ -7,7 +7,6 @@
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-2"

1
internal/catalog/catalogtest/integration_test_data/v2beta1/node-3-health.json

@ -20,7 +20,6 @@
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-3"

1
internal/catalog/catalogtest/integration_test_data/v2beta1/node-3.json

@ -7,7 +7,6 @@
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-3"

1
internal/catalog/catalogtest/integration_test_data/v2beta1/node-4-health.json

@ -20,7 +20,6 @@
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-4"

1
internal/catalog/catalogtest/integration_test_data/v2beta1/node-4.json

@ -7,7 +7,6 @@
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-4"

10
internal/catalog/catalogtest/test_integration_v2beta1.go

@ -68,7 +68,7 @@ func VerifyCatalogV2Beta1IntegrationTestResults(t *testing.T, client pbresource.
c.RequireResourceExists(t, rtest.Resource(pbcatalog.ServiceType, "foo").ID())
for i := 1; i < 5; i++ {
nodeId := rtest.Resource(pbcatalog.NodeType, fmt.Sprintf("node-%d", i)).WithTenancy(resource.DefaultNamespacedTenancy()).ID()
nodeId := rtest.Resource(pbcatalog.NodeType, fmt.Sprintf("node-%d", i)).WithTenancy(resource.DefaultPartitionedTenancy()).ID()
c.RequireResourceExists(t, nodeId)
res := c.RequireResourceExists(t, rtest.Resource(pbcatalog.HealthStatusType, fmt.Sprintf("node-%d-health", i)).ID())
@ -85,10 +85,10 @@ func VerifyCatalogV2Beta1IntegrationTestResults(t *testing.T, client pbresource.
})
testutil.RunStep(t, "node-health-reconciliation", func(t *testing.T) {
c.WaitForStatusCondition(t, rtest.Resource(pbcatalog.NodeType, "node-1").ID(), nodehealth.StatusKey, nodehealth.ConditionPassing)
c.WaitForStatusCondition(t, rtest.Resource(pbcatalog.NodeType, "node-2").ID(), nodehealth.StatusKey, nodehealth.ConditionWarning)
c.WaitForStatusCondition(t, rtest.Resource(pbcatalog.NodeType, "node-3").ID(), nodehealth.StatusKey, nodehealth.ConditionCritical)
c.WaitForStatusCondition(t, rtest.Resource(pbcatalog.NodeType, "node-4").ID(), nodehealth.StatusKey, nodehealth.ConditionMaintenance)
c.WaitForStatusCondition(t, rtest.Resource(pbcatalog.NodeType, "node-1").WithTenancy(resource.DefaultPartitionedTenancy()).ID(), nodehealth.StatusKey, nodehealth.ConditionPassing)
c.WaitForStatusCondition(t, rtest.Resource(pbcatalog.NodeType, "node-2").WithTenancy(resource.DefaultPartitionedTenancy()).ID(), nodehealth.StatusKey, nodehealth.ConditionWarning)
c.WaitForStatusCondition(t, rtest.Resource(pbcatalog.NodeType, "node-3").WithTenancy(resource.DefaultPartitionedTenancy()).ID(), nodehealth.StatusKey, nodehealth.ConditionCritical)
c.WaitForStatusCondition(t, rtest.Resource(pbcatalog.NodeType, "node-4").WithTenancy(resource.DefaultPartitionedTenancy()).ID(), nodehealth.StatusKey, nodehealth.ConditionMaintenance)
})
testutil.RunStep(t, "workload-health-reconciliation", func(t *testing.T) {

4
internal/catalog/catalogtest/test_lifecycle_v2beta1.go

@ -7,6 +7,7 @@ import (
"testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/resource"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
@ -49,6 +50,7 @@ func RunCatalogV2Beta1NodeLifecycleIntegrationTest(t *testing.T, client pbresour
// initial node creation
node := rtest.Resource(pbcatalog.NodeType, nodeName).
WithTenancy(resource.DefaultPartitionedTenancy()).
WithData(t, &pbcatalog.Node{
Addresses: []*pbcatalog.NodeAddress{
{Host: "172.16.2.3"},
@ -246,11 +248,13 @@ func runV2Beta1NodeAssociatedWorkloadLifecycleIntegrationTest(t *testing.T, c *r
// Insert a some nodes to link the workloads to at various points throughout the test
node1 := rtest.Resource(pbcatalog.NodeType, nodeName1).
WithTenancy(resource.DefaultPartitionedTenancy()).
WithData(t, &pbcatalog.Node{
Addresses: []*pbcatalog.NodeAddress{{Host: "172.17.9.10"}},
}).
Write(t, c)
node2 := rtest.Resource(pbcatalog.NodeType, nodeName2).
WithTenancy(resource.DefaultPartitionedTenancy()).
WithData(t, &pbcatalog.Node{
Addresses: []*pbcatalog.NodeAddress{{Host: "172.17.9.11"}},
}).

16
internal/catalog/internal/controllers/nodehealth/controller_test.go

@ -75,7 +75,9 @@ type nodeHealthControllerTestSuite struct {
func (suite *nodeHealthControllerTestSuite) writeNode(name string, tenancy *pbresource.Tenancy) *pbresource.ID {
return resourcetest.Resource(pbcatalog.NodeType, name).
WithData(suite.T(), nodeData).
WithTenancy(tenancy).
WithTenancy(&pbresource.Tenancy{
Partition: tenancy.Partition,
}).
Write(suite.T(), suite.resourceClient).Id
}
@ -122,7 +124,9 @@ func (suite *nodeHealthControllerTestSuite) TestGetNodeHealthNoNode() {
// no error is returned but also no data is. The default passing
// status should then be returned in the same manner as the node
// existing but with no associated HealthStatus resources.
ref := resourceID(pbcatalog.NodeType, "foo", tenancy)
ref := resourceID(pbcatalog.NodeType, "foo", &pbresource.Tenancy{
Partition: tenancy.Partition,
})
ref.Uid = ulid.Make().String()
health, err := getNodeHealth(context.Background(), suite.runtime, ref)
@ -181,7 +185,9 @@ func (suite *nodeHealthControllerTestSuite) TestReconcileNodeNotFound() {
// This test ensures that removed nodes are ignored. In particular we don't
// want to propagate the error and indefinitely keep re-reconciling in this case.
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
ID: resourceID(pbcatalog.NodeType, "not-found", tenancy),
ID: resourceID(pbcatalog.NodeType, "not-found", &pbresource.Tenancy{
Partition: tenancy.Partition,
}),
})
require.NoError(suite.T(), err)
})
@ -349,7 +355,9 @@ func (suite *nodeHealthControllerTestSuite) TestController() {
},
},
}).
WithTenancy(tenancy).
WithTenancy(&pbresource.Tenancy{
Partition: tenancy.Partition,
}).
Write(suite.T(), suite.resourceClient)
// wait for rereconciliation to happen

28
internal/catalog/internal/controllers/workloadhealth/controller_test.go

@ -119,7 +119,9 @@ func (suite *controllerSuite) injectNodeWithStatus(name string, health pbcatalog
return resourcetest.Resource(pbcatalog.NodeType, name).
WithData(suite.T(), nodeData).
WithTenancy(tenancy).
WithTenancy(&pbresource.Tenancy{
Partition: tenancy.Partition,
}).
WithStatus(nodehealth.StatusKey, &pbresource.Status{
Conditions: []*pbresource.Condition{
{
@ -422,7 +424,9 @@ func (suite *workloadHealthControllerTestSuite) TestReconcileNotFound() {
node := resourcetest.Resource(pbcatalog.NodeType, "test-node").
WithData(suite.T(), nodeData).
WithTenancy(tenancy).
WithTenancy(&pbresource.Tenancy{
Partition: tenancy.Partition,
}).
// Whether this gets written or not doesn't matter
Build()
@ -468,7 +472,9 @@ func (suite *workloadHealthControllerTestSuite) TestGetNodeHealthError() {
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) {
node := resourcetest.Resource(pbcatalog.NodeType, "test-node").
WithData(suite.T(), nodeData).
WithTenancy(tenancy).
WithTenancy(&pbresource.Tenancy{
Partition: tenancy.Partition,
}).
Write(suite.T(), suite.client)
workload := resourcetest.Resource(pbcatalog.WorkloadType, "test-workload").
@ -712,7 +718,9 @@ func (suite *getNodeHealthTestSuite) TestNotfound() {
// could occur when a linked node gets removed without the workloads being modified/removed.
// When that occurs we want to steer traffic away from the linked node as soon as possible.
suite.controllerSuite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) {
health, err := getNodeHealth(context.Background(), suite.runtime, resourceID(pbcatalog.NodeType, "not-found", tenancy))
health, err := getNodeHealth(context.Background(), suite.runtime, resourceID(pbcatalog.NodeType, "not-found", &pbresource.Tenancy{
Partition: tenancy.Partition,
}))
require.NoError(suite.T(), err)
require.Equal(suite.T(), pbcatalog.Health_HEALTH_CRITICAL, health)
})
@ -736,7 +744,9 @@ func (suite *getNodeHealthTestSuite) TestUnreconciled() {
suite.controllerSuite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) {
node := resourcetest.Resource(pbcatalog.NodeType, "unreconciled").
WithData(suite.T(), nodeData).
WithTenancy(tenancy).
WithTenancy(&pbresource.Tenancy{
Partition: tenancy.Partition,
}).
Write(suite.T(), suite.client).
GetId()
@ -757,7 +767,9 @@ func (suite *getNodeHealthTestSuite) TestNoConditions() {
suite.controllerSuite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) {
node := resourcetest.Resource(pbcatalog.NodeType, "no-conditions").
WithData(suite.T(), nodeData).
WithTenancy(tenancy).
WithTenancy(&pbresource.Tenancy{
Partition: tenancy.Partition,
}).
WithStatus(nodehealth.StatusKey, &pbresource.Status{}).
Write(suite.T(), suite.client).
GetId()
@ -780,7 +792,9 @@ func (suite *getNodeHealthTestSuite) TestInvalidReason() {
suite.controllerSuite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) {
node := resourcetest.Resource(pbcatalog.NodeType, "invalid-reason").
WithData(suite.T(), nodeData).
WithTenancy(tenancy).
WithTenancy(&pbresource.Tenancy{
Partition: tenancy.Partition,
}).
WithStatus(nodehealth.StatusKey, &pbresource.Status{
Conditions: []*pbresource.Condition{
{

9
internal/catalog/internal/mappers/nodemapper/node_mapper.go

@ -5,6 +5,7 @@ package nodemapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
@ -26,9 +27,11 @@ func New() *NodeMapper {
// the workload and with the name populated from the workloads NodeName field.
func (m *NodeMapper) NodeIDFromWorkload(workload *pbresource.Resource, workloadData *pbcatalog.Workload) *pbresource.ID {
return &pbresource.ID{
Type: pbcatalog.NodeType,
Tenancy: workload.Id.Tenancy,
Name: workloadData.NodeName,
Type: pbcatalog.NodeType,
Tenancy: &pbresource.Tenancy{
Partition: workload.Id.Tenancy.GetPartition(),
},
Name: workloadData.NodeName,
}
}

11
internal/catalog/internal/mappers/nodemapper/node_mapper_test.go

@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
@ -29,9 +30,11 @@ func TestNodeMapper_NodeIDFromWorkload(t *testing.T) {
actual := mapper.NodeIDFromWorkload(workload, data)
expected := &pbresource.ID{
Type: pbcatalog.NodeType,
Tenancy: workload.Id.Tenancy,
Name: "test-node",
Type: pbcatalog.NodeType,
Tenancy: &pbresource.Tenancy{
Partition: workload.Id.Tenancy.GetPartition(),
},
Name: "test-node",
}
prototest.AssertDeepEqual(t, expected, actual)
@ -55,10 +58,12 @@ func TestNodeMapper_WorkloadTracking(t *testing.T) {
mapper := New()
node1 := resourcetest.Resource(pbcatalog.NodeType, "node1").
WithTenancy(resource.DefaultPartitionedTenancy()).
WithData(t, &pbcatalog.Node{Addresses: []*pbcatalog.NodeAddress{{Host: "198.18.0.1"}}}).
Build()
node2 := resourcetest.Resource(pbcatalog.NodeType, "node2").
WithTenancy(resource.DefaultPartitionedTenancy()).
WithData(t, &pbcatalog.Node{Addresses: []*pbcatalog.NodeAddress{{Host: "198.18.0.2"}}}).
Build()

8
internal/catalog/internal/types/health_status_test.go

@ -72,9 +72,11 @@ func TestValidateHealthStatus_Ok(t *testing.T) {
},
"node-owned": {
owner: &pbresource.ID{
Type: pbcatalog.NodeType,
Tenancy: defaultHealthStatusOwnerTenancy,
Name: "bar-node",
Type: pbcatalog.NodeType,
Tenancy: &pbresource.Tenancy{
Partition: defaultHealthStatusOwnerTenancy.Partition,
},
Name: "bar-node",
},
},
}

11
internal/catalog/internal/types/node.go

@ -16,14 +16,9 @@ type DecodedNode = resource.DecodedResource[*pbcatalog.Node]
func RegisterNode(r resource.Registry) {
r.Register(resource.Registration{
Type: pbcatalog.NodeType,
Proto: &pbcatalog.Node{},
// TODO: A node should be partition scoped. However its HealthStatus which is
// namespace scoped has Node as an owner. We do not support ownership between resources
// of differing scope at this time. HealthStatus will probably be split out into two different
// types, one for namespace scoped owners and the other for partition scoped owners.
// Until that time, Node will remain namespace scoped.
Scope: resource.ScopeNamespace,
Type: pbcatalog.NodeType,
Proto: &pbcatalog.Node{},
Scope: resource.ScopePartition,
Validate: ValidateNode,
ACLs: &resource.ACLHooks{
Read: aclReadHookNode,

2
proto-public/pbcatalog/v2beta1/node.pb.go

@ -143,7 +143,7 @@ var file_pbcatalog_v2beta1_node_proto_rawDesc = []byte{
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74,
0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4e, 0x6f, 0x64,
0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x65, 0x73, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x03, 0x22, 0x3d, 0x0a, 0x0b, 0x4e,
0x73, 0x65, 0x73, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x02, 0x22, 0x3d, 0x0a, 0x0b, 0x4e,
0x6f, 0x64, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f,
0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1a,
0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,

5
proto-public/pbcatalog/v2beta1/node.proto

@ -8,10 +8,7 @@ package hashicorp.consul.catalog.v2beta1;
import "pbresource/annotations.proto";
message Node {
option (hashicorp.consul.resource.spec) = {
// TODO: This should eventually be SCOPE_PARTITION but that will require further changes to code
scope: SCOPE_NAMESPACE,
};
option (hashicorp.consul.resource.spec) = {scope: SCOPE_PARTITION};
repeated NodeAddress addresses = 1;
}

Loading…
Cancel
Save