|
|
|
@ -2501,189 +2501,6 @@ func TestACLResolver_Legacy(t *testing.T) {
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
|
|
func TestACL_Replication(t *testing.T) {
|
|
|
|
|
t.Parallel()
|
|
|
|
|
aclExtendPolicies := []string{"extend-cache", "async-cache"} //"async-cache"
|
|
|
|
|
|
|
|
|
|
for _, aclDownPolicy := range aclExtendPolicies {
|
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
|
})
|
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
|
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
|
|
|
|
|
c.ACLDownPolicy = aclDownPolicy
|
|
|
|
|
c.ACLTokenReplication = true
|
|
|
|
|
c.ACLReplicationRate = 100
|
|
|
|
|
c.ACLReplicationBurst = 100
|
|
|
|
|
c.ACLReplicationApplyLimit = 1000000
|
|
|
|
|
})
|
|
|
|
|
s2.tokens.UpdateReplicationToken("root")
|
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
|
|
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
|
c.Datacenter = "dc3"
|
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
|
c.ACLDownPolicy = "deny"
|
|
|
|
|
c.ACLTokenReplication = true
|
|
|
|
|
c.ACLReplicationRate = 100
|
|
|
|
|
c.ACLReplicationBurst = 100
|
|
|
|
|
c.ACLReplicationApplyLimit = 1000000
|
|
|
|
|
})
|
|
|
|
|
s3.tokens.UpdateReplicationToken("root")
|
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
|
|
// Try to join.
|
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
|
joinWAN(t, s3, s1)
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc3")
|
|
|
|
|
|
|
|
|
|
// Create a new token.
|
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
|
Datacenter: "dc1",
|
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
|
ACL: structs.ACL{
|
|
|
|
|
Name: "User token",
|
|
|
|
|
Type: structs.ACLTokenTypeClient,
|
|
|
|
|
Rules: testACLPolicy,
|
|
|
|
|
},
|
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
|
}
|
|
|
|
|
var id string
|
|
|
|
|
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
|
}
|
|
|
|
|
// Wait for replication to occur.
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
|
_, acl, err := s2.fsm.State().ACLTokenGetBySecret(nil, id)
|
|
|
|
|
if err != nil {
|
|
|
|
|
r.Fatal(err)
|
|
|
|
|
}
|
|
|
|
|
if acl == nil {
|
|
|
|
|
r.Fatal(nil)
|
|
|
|
|
}
|
|
|
|
|
_, acl, err = s3.fsm.State().ACLTokenGetBySecret(nil, id)
|
|
|
|
|
if err != nil {
|
|
|
|
|
r.Fatal(err)
|
|
|
|
|
}
|
|
|
|
|
if acl == nil {
|
|
|
|
|
r.Fatal(nil)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// Kill the ACL datacenter.
|
|
|
|
|
s1.Shutdown()
|
|
|
|
|
|
|
|
|
|
// Token should resolve on s2, which has replication + extend-cache.
|
|
|
|
|
acl, err := s2.ResolveToken(id)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
|
}
|
|
|
|
|
if acl == nil {
|
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check the policy
|
|
|
|
|
if acl.KeyRead("bar") {
|
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
|
}
|
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Although s3 has replication, and we verified that the ACL is there,
|
|
|
|
|
// it can not be used because of the down policy.
|
|
|
|
|
acl, err = s3.ResolveToken(id)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
|
}
|
|
|
|
|
if acl == nil {
|
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check the policy.
|
|
|
|
|
if acl.KeyRead("bar") {
|
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
|
}
|
|
|
|
|
if acl.KeyRead("foo/test") {
|
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestACL_MultiDC_Found(t *testing.T) {
|
|
|
|
|
t.Parallel()
|
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
|
})
|
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
|
c.PrimaryDatacenter = "dc1" // Enable ACLs!
|
|
|
|
|
})
|
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
|
|
|
|
|
|
|
|
|
// Create a new token
|
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
|
Datacenter: "dc1",
|
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
|
ACL: structs.ACL{
|
|
|
|
|
Name: "User token",
|
|
|
|
|
Type: structs.ACLTokenTypeClient,
|
|
|
|
|
Rules: testACLPolicy,
|
|
|
|
|
},
|
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
|
}
|
|
|
|
|
var id string
|
|
|
|
|
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Token should resolve
|
|
|
|
|
acl, err := s2.ResolveToken(id)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
|
}
|
|
|
|
|
if acl == nil {
|
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check the policy
|
|
|
|
|
if acl.KeyRead("bar") {
|
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
|
}
|
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
func TestACL_filterHealthChecks(t *testing.T) {
|
|
|
|
|
t.Parallel()
|
|
|
|
|
// Create some health checks.
|
|
|
|
|