consul/agent/hcp/testing.go

183 lines
5.0 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
[COMPLIANCE] License changes (#18443) * Adding explicit MPL license for sub-package This directory and its subdirectories (packages) contain files licensed with the MPLv2 `LICENSE` file in this directory and are intentionally licensed separately from the BSL `LICENSE` file at the root of this repository. * Adding explicit MPL license for sub-package This directory and its subdirectories (packages) contain files licensed with the MPLv2 `LICENSE` file in this directory and are intentionally licensed separately from the BSL `LICENSE` file at the root of this repository. * Updating the license from MPL to Business Source License Going forward, this project will be licensed under the Business Source License v1.1. Please see our blog post for more details at <Blog URL>, FAQ at www.hashicorp.com/licensing-faq, and details of the license at www.hashicorp.com/bsl. * add missing license headers * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 --------- Co-authored-by: hashicorp-copywrite[bot] <110428419+hashicorp-copywrite[bot]@users.noreply.github.com>
2023-08-11 13:12:13 +00:00
// SPDX-License-Identifier: BUSL-1.1
package hcp
import (
"encoding/json"
"fmt"
"log"
"net/http"
"regexp"
"strings"
"sync"
"time"
Update HCP bootstrapping to support existing clusters (#16916) * Persist HCP management token from server config We want to move away from injecting an initial management token into Consul clusters linked to HCP. The reasoning is that by using a separate class of token we can have more flexibility in terms of allowing HCP's token to co-exist with the user's management token. Down the line we can also more easily adjust the permissions attached to HCP's token to limit it's scope. With these changes, the cloud management token is like the initial management token in that iit has the same global management policy and if it is created it effectively bootstraps the ACL system. * Update SDK and mock HCP server The HCP management token will now be sent in a special field rather than as Consul's "initial management" token configuration. This commit also updates the mock HCP server to more accurately reflect the behavior of the CCM backend. * Refactor HCP bootstrapping logic and add tests We want to allow users to link Consul clusters that already exist to HCP. Existing clusters need care when bootstrapped by HCP, since we do not want to do things like change ACL/TLS settings for a running cluster. Additional changes: * Deconstruct MaybeBootstrap so that it can be tested. The HCP Go SDK requires HTTPS to fetch a token from the Auth URL, even if the backend server is mocked. By pulling the hcp.Client creation out we can modify its TLS configuration in tests while keeping the secure behavior in production code. * Add light validation for data received/loaded. * Sanitize initial_management token from received config, since HCP will only ever use the CloudConfig.MangementToken. * Add changelog entry
2023-04-27 20:27:39 +00:00
hcpgnm "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/client/global_network_manager_service"
gnmmod "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models"
"github.com/hashicorp/hcp-sdk-go/resource"
)
type TestEndpoint struct {
Methods []string
PathSuffix string
Handler func(r *http.Request, cluster resource.Resource) (interface{}, error)
}
type MockHCPServer struct {
mu sync.Mutex
handlers map[string]TestEndpoint
servers map[string]*gnmmod.HashicorpCloudGlobalNetworkManager20220215Server
}
var basePathRe = regexp.MustCompile("/global-network-manager/[^/]+/organizations/([^/]+)/projects/([^/]+)/clusters/([^/]+)/([^/]+.*)")
func NewMockHCPServer() *MockHCPServer {
s := &MockHCPServer{
handlers: make(map[string]TestEndpoint),
servers: make(map[string]*gnmmod.HashicorpCloudGlobalNetworkManager20220215Server),
}
// Define endpoints in this package
s.AddEndpoint(TestEndpoint{
Methods: []string{"POST"},
PathSuffix: "agent/server-state",
Handler: s.handleStatus,
})
s.AddEndpoint(TestEndpoint{
Methods: []string{"POST"},
PathSuffix: "agent/discover",
Handler: s.handleDiscover,
})
return s
}
// AddEndpoint allows adding additional endpoints from other packages e.g.
// bootstrap (which can't be merged into one package due to dependency cycles).
// It's not safe to call this concurrently with any other call to AddEndpoint or
// ServeHTTP.
func (s *MockHCPServer) AddEndpoint(e TestEndpoint) {
s.handlers[e.PathSuffix] = e
}
func (s *MockHCPServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.mu.Lock()
defer s.mu.Unlock()
Update HCP bootstrapping to support existing clusters (#16916) * Persist HCP management token from server config We want to move away from injecting an initial management token into Consul clusters linked to HCP. The reasoning is that by using a separate class of token we can have more flexibility in terms of allowing HCP's token to co-exist with the user's management token. Down the line we can also more easily adjust the permissions attached to HCP's token to limit it's scope. With these changes, the cloud management token is like the initial management token in that iit has the same global management policy and if it is created it effectively bootstraps the ACL system. * Update SDK and mock HCP server The HCP management token will now be sent in a special field rather than as Consul's "initial management" token configuration. This commit also updates the mock HCP server to more accurately reflect the behavior of the CCM backend. * Refactor HCP bootstrapping logic and add tests We want to allow users to link Consul clusters that already exist to HCP. Existing clusters need care when bootstrapped by HCP, since we do not want to do things like change ACL/TLS settings for a running cluster. Additional changes: * Deconstruct MaybeBootstrap so that it can be tested. The HCP Go SDK requires HTTPS to fetch a token from the Auth URL, even if the backend server is mocked. By pulling the hcp.Client creation out we can modify its TLS configuration in tests while keeping the secure behavior in production code. * Add light validation for data received/loaded. * Sanitize initial_management token from received config, since HCP will only ever use the CloudConfig.MangementToken. * Add changelog entry
2023-04-27 20:27:39 +00:00
if r.URL.Path == "/oauth2/token" {
mockTokenResponse(w)
return
}
matches := basePathRe.FindStringSubmatch(r.URL.Path)
if matches == nil || len(matches) < 5 {
w.WriteHeader(404)
log.Printf("ERROR 404: %s %s\n", r.Method, r.URL.Path)
return
}
cluster := resource.Resource{
ID: matches[3],
Type: "cluster",
Organization: matches[1],
Project: matches[2],
}
found := false
var resp interface{}
var err error
for _, e := range s.handlers {
if e.PathSuffix == matches[4] {
found = true
if !enforceMethod(w, r, e.Methods) {
return
}
resp, err = e.Handler(r, cluster)
break
}
}
if !found {
w.WriteHeader(404)
log.Printf("ERROR 404: %s %s\n", r.Method, r.URL.Path)
return
}
if err != nil {
errResponse(w, err)
return
}
if resp == nil {
// no response body
log.Printf("OK 204: %s %s\n", r.Method, r.URL.Path)
w.WriteHeader(http.StatusNoContent)
return
}
bs, err := json.MarshalIndent(resp, "", " ")
if err != nil {
errResponse(w, err)
return
}
log.Printf("OK 200: %s %s\n", r.Method, r.URL.Path)
w.Header().Set("content-type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(bs)
}
func enforceMethod(w http.ResponseWriter, r *http.Request, methods []string) bool {
for _, m := range methods {
if strings.EqualFold(r.Method, m) {
return true
}
}
// No match, sent 4xx
w.WriteHeader(http.StatusMethodNotAllowed)
log.Printf("ERROR 405: bad method (not in %v): %s %s\n", methods, r.Method, r.URL.Path)
return false
}
func mockTokenResponse(w http.ResponseWriter) {
Update HCP bootstrapping to support existing clusters (#16916) * Persist HCP management token from server config We want to move away from injecting an initial management token into Consul clusters linked to HCP. The reasoning is that by using a separate class of token we can have more flexibility in terms of allowing HCP's token to co-exist with the user's management token. Down the line we can also more easily adjust the permissions attached to HCP's token to limit it's scope. With these changes, the cloud management token is like the initial management token in that iit has the same global management policy and if it is created it effectively bootstraps the ACL system. * Update SDK and mock HCP server The HCP management token will now be sent in a special field rather than as Consul's "initial management" token configuration. This commit also updates the mock HCP server to more accurately reflect the behavior of the CCM backend. * Refactor HCP bootstrapping logic and add tests We want to allow users to link Consul clusters that already exist to HCP. Existing clusters need care when bootstrapped by HCP, since we do not want to do things like change ACL/TLS settings for a running cluster. Additional changes: * Deconstruct MaybeBootstrap so that it can be tested. The HCP Go SDK requires HTTPS to fetch a token from the Auth URL, even if the backend server is mocked. By pulling the hcp.Client creation out we can modify its TLS configuration in tests while keeping the secure behavior in production code. * Add light validation for data received/loaded. * Sanitize initial_management token from received config, since HCP will only ever use the CloudConfig.MangementToken. * Add changelog entry
2023-04-27 20:27:39 +00:00
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
Update HCP bootstrapping to support existing clusters (#16916) * Persist HCP management token from server config We want to move away from injecting an initial management token into Consul clusters linked to HCP. The reasoning is that by using a separate class of token we can have more flexibility in terms of allowing HCP's token to co-exist with the user's management token. Down the line we can also more easily adjust the permissions attached to HCP's token to limit it's scope. With these changes, the cloud management token is like the initial management token in that iit has the same global management policy and if it is created it effectively bootstraps the ACL system. * Update SDK and mock HCP server The HCP management token will now be sent in a special field rather than as Consul's "initial management" token configuration. This commit also updates the mock HCP server to more accurately reflect the behavior of the CCM backend. * Refactor HCP bootstrapping logic and add tests We want to allow users to link Consul clusters that already exist to HCP. Existing clusters need care when bootstrapped by HCP, since we do not want to do things like change ACL/TLS settings for a running cluster. Additional changes: * Deconstruct MaybeBootstrap so that it can be tested. The HCP Go SDK requires HTTPS to fetch a token from the Auth URL, even if the backend server is mocked. By pulling the hcp.Client creation out we can modify its TLS configuration in tests while keeping the secure behavior in production code. * Add light validation for data received/loaded. * Sanitize initial_management token from received config, since HCP will only ever use the CloudConfig.MangementToken. * Add changelog entry
2023-04-27 20:27:39 +00:00
w.Write([]byte(`{"access_token": "token", "token_type": "Bearer"}`))
}
func (s *MockHCPServer) handleStatus(r *http.Request, cluster resource.Resource) (interface{}, error) {
Update HCP bootstrapping to support existing clusters (#16916) * Persist HCP management token from server config We want to move away from injecting an initial management token into Consul clusters linked to HCP. The reasoning is that by using a separate class of token we can have more flexibility in terms of allowing HCP's token to co-exist with the user's management token. Down the line we can also more easily adjust the permissions attached to HCP's token to limit it's scope. With these changes, the cloud management token is like the initial management token in that iit has the same global management policy and if it is created it effectively bootstraps the ACL system. * Update SDK and mock HCP server The HCP management token will now be sent in a special field rather than as Consul's "initial management" token configuration. This commit also updates the mock HCP server to more accurately reflect the behavior of the CCM backend. * Refactor HCP bootstrapping logic and add tests We want to allow users to link Consul clusters that already exist to HCP. Existing clusters need care when bootstrapped by HCP, since we do not want to do things like change ACL/TLS settings for a running cluster. Additional changes: * Deconstruct MaybeBootstrap so that it can be tested. The HCP Go SDK requires HTTPS to fetch a token from the Auth URL, even if the backend server is mocked. By pulling the hcp.Client creation out we can modify its TLS configuration in tests while keeping the secure behavior in production code. * Add light validation for data received/loaded. * Sanitize initial_management token from received config, since HCP will only ever use the CloudConfig.MangementToken. * Add changelog entry
2023-04-27 20:27:39 +00:00
var req hcpgnm.AgentPushServerStateBody
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return nil, err
}
log.Printf("STATUS UPDATE: server=%s version=%s leader=%v hasLeader=%v healthy=%v tlsCertExpiryDays=%1.0f",
Update HCP bootstrapping to support existing clusters (#16916) * Persist HCP management token from server config We want to move away from injecting an initial management token into Consul clusters linked to HCP. The reasoning is that by using a separate class of token we can have more flexibility in terms of allowing HCP's token to co-exist with the user's management token. Down the line we can also more easily adjust the permissions attached to HCP's token to limit it's scope. With these changes, the cloud management token is like the initial management token in that iit has the same global management policy and if it is created it effectively bootstraps the ACL system. * Update SDK and mock HCP server The HCP management token will now be sent in a special field rather than as Consul's "initial management" token configuration. This commit also updates the mock HCP server to more accurately reflect the behavior of the CCM backend. * Refactor HCP bootstrapping logic and add tests We want to allow users to link Consul clusters that already exist to HCP. Existing clusters need care when bootstrapped by HCP, since we do not want to do things like change ACL/TLS settings for a running cluster. Additional changes: * Deconstruct MaybeBootstrap so that it can be tested. The HCP Go SDK requires HTTPS to fetch a token from the Auth URL, even if the backend server is mocked. By pulling the hcp.Client creation out we can modify its TLS configuration in tests while keeping the secure behavior in production code. * Add light validation for data received/loaded. * Sanitize initial_management token from received config, since HCP will only ever use the CloudConfig.MangementToken. * Add changelog entry
2023-04-27 20:27:39 +00:00
req.ServerState.Name,
req.ServerState.Version,
req.ServerState.Raft.IsLeader,
req.ServerState.Raft.KnownLeader,
req.ServerState.Autopilot.Healthy,
Manual Backport of [Cloud][CC-6925] Updates to pushing server state into release/1.17.x (#19810) * [Cloud][CC-6925] Updates to pushing server state (#19682) * Upgrade hcp-sdk-go to latest version v0.73 Changes: - go get github.com/hashicorp/hcp-sdk-go - go mod tidy * From upgrade: regenerate protobufs for upgrade from 1.30 to 1.31 Ran: `make proto` Slack: https://hashicorp.slack.com/archives/C0253EQ5B40/p1701105418579429 * From upgrade: fix mock interface implementation After upgrading, there is the following compile error: cannot use &mockHCPCfg{} (value of type *mockHCPCfg) as "github.com/hashicorp/hcp-sdk-go/config".HCPConfig value in return statement: *mockHCPCfg does not implement "github.com/hashicorp/hcp-sdk-go/config".HCPConfig (missing method Logout) Solution: update the mock to have the missing Logout method * From upgrade: Lint: remove usage of deprecated req.ServerState.TLS Due to upgrade, linting is erroring due to usage of a newly deprecated field 22:47:56 [consul]: make lint --> Running golangci-lint (.) agent/hcp/testing.go:157:24: SA1019: req.ServerState.TLS is deprecated: use server_tls.internal_rpc instead. (staticcheck) time.Until(time.Time(req.ServerState.TLS.CertExpiry)).Hours()/24, ^ * From upgrade: adjust oidc error message From the upgrade, this test started failing: === FAIL: internal/go-sso/oidcauth TestOIDC_ClaimsFromAuthCode/failed_code_exchange (re-run 2) (0.01s) oidc_test.go:393: unexpected error: Provider login failed: Error exchanging oidc code: oauth2: "invalid_grant" "unexpected auth code" Prior to the upgrade, the error returned was: ``` Provider login failed: Error exchanging oidc code: oauth2: cannot fetch token: 401 Unauthorized\nResponse: {\"error\":\"invalid_grant\",\"error_description\":\"unexpected auth code\"}\n ``` Now the error returned is as below and does not contain "cannot fetch token" ``` Provider login failed: Error exchanging oidc code: oauth2: "invalid_grant" "unexpected auth code" ``` * Update AgentPushServerState structs with new fields HCP-side changes for the new fields are in: https://github.com/hashicorp/cloud-global-network-manager-service/pull/1195/files * Minor refactor for hcpServerStatus to abstract tlsInfo into struct This will make it easier to set the same tls-info information to both - status.TLS (deprecated field) - status.ServerTLSMetadata (new field to use instead) * Update hcpServerStatus to parse out information for new fields Changes: - Improve error message and handling (encountered some issues and was confused) - Set new field TLSInfo.CertIssuer - Collect certificate authority metadata and set on TLSInfo.CertificateAuthorities - Set TLSInfo on both server.TLS and server.ServerTLSMetadata.InternalRPC * Update serverStatusToHCP to convert new fields to GNM rpc * Add changelog * Feedback: connect.ParseCert, caCerts * Feedback: refactor and unit test server status * Feedback: test to use expected struct * Feedback: certificate with intermediate * Feedback: catch no leaf, remove expectedErr * Feedback: update todos with jira ticket * Feedback: mock tlsConfigurator * make proto for additional file in 1.17 not in main
2023-12-04 20:34:24 +00:00
time.Until(time.Time(req.ServerState.ServerTLS.InternalRPC.CertExpiry)).Hours()/24,
)
Update HCP bootstrapping to support existing clusters (#16916) * Persist HCP management token from server config We want to move away from injecting an initial management token into Consul clusters linked to HCP. The reasoning is that by using a separate class of token we can have more flexibility in terms of allowing HCP's token to co-exist with the user's management token. Down the line we can also more easily adjust the permissions attached to HCP's token to limit it's scope. With these changes, the cloud management token is like the initial management token in that iit has the same global management policy and if it is created it effectively bootstraps the ACL system. * Update SDK and mock HCP server The HCP management token will now be sent in a special field rather than as Consul's "initial management" token configuration. This commit also updates the mock HCP server to more accurately reflect the behavior of the CCM backend. * Refactor HCP bootstrapping logic and add tests We want to allow users to link Consul clusters that already exist to HCP. Existing clusters need care when bootstrapped by HCP, since we do not want to do things like change ACL/TLS settings for a running cluster. Additional changes: * Deconstruct MaybeBootstrap so that it can be tested. The HCP Go SDK requires HTTPS to fetch a token from the Auth URL, even if the backend server is mocked. By pulling the hcp.Client creation out we can modify its TLS configuration in tests while keeping the secure behavior in production code. * Add light validation for data received/loaded. * Sanitize initial_management token from received config, since HCP will only ever use the CloudConfig.MangementToken. * Add changelog entry
2023-04-27 20:27:39 +00:00
s.servers[req.ServerState.Name] = &gnmmod.HashicorpCloudGlobalNetworkManager20220215Server{
GossipPort: req.ServerState.GossipPort,
ID: req.ServerState.ID,
LanAddress: req.ServerState.LanAddress,
Name: req.ServerState.Name,
RPCPort: req.ServerState.RPCPort,
}
return "{}", nil
}
func (s *MockHCPServer) handleDiscover(r *http.Request, cluster resource.Resource) (interface{}, error) {
servers := make([]*gnmmod.HashicorpCloudGlobalNetworkManager20220215Server, len(s.servers))
for _, server := range s.servers {
servers = append(servers, server)
}
return gnmmod.HashicorpCloudGlobalNetworkManager20220215AgentDiscoverResponse{Servers: servers}, nil
}
func errResponse(w http.ResponseWriter, err error) {
log.Printf("ERROR 500: %s\n", err)
w.WriteHeader(500)
w.Write([]byte(fmt.Sprintf(`{"error": %q}`, err.Error())))
}