2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2014-04-23 19:57:06 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2022-06-24 22:17:35 +00:00
|
|
|
"context"
|
2022-09-22 21:37:58 +00:00
|
|
|
"encoding/json"
|
2014-04-28 22:09:46 +00:00
|
|
|
"fmt"
|
2014-04-23 19:57:06 +00:00
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2019-04-16 16:00:15 +00:00
|
|
|
"net/url"
|
2022-11-10 16:26:01 +00:00
|
|
|
"os"
|
2014-04-23 19:57:06 +00:00
|
|
|
"path/filepath"
|
2020-09-24 10:13:14 +00:00
|
|
|
"sync/atomic"
|
2014-04-23 19:57:06 +00:00
|
|
|
"testing"
|
2022-06-24 22:17:35 +00:00
|
|
|
"time"
|
2015-10-15 21:45:10 +00:00
|
|
|
|
2021-08-25 19:20:32 +00:00
|
|
|
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2020-09-22 18:34:09 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2023-02-17 21:14:46 +00:00
|
|
|
"github.com/hashicorp/consul/proto/private/pbpeering"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
2020-10-08 23:31:54 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2020-04-08 18:37:24 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2022-06-24 22:17:35 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2014-04-23 19:57:06 +00:00
|
|
|
)
|
|
|
|
|
2022-06-24 22:17:35 +00:00
|
|
|
func TestUIIndex(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-11-30 19:24:39 +00:00
|
|
|
// Make a test dir to serve UI files
|
2017-05-12 13:41:13 +00:00
|
|
|
uiDir := testutil.TempDir(t, "consul")
|
2015-11-30 19:24:39 +00:00
|
|
|
|
|
|
|
// Make the server
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2020-10-01 11:26:19 +00:00
|
|
|
ui_config {
|
|
|
|
dir = "`+uiDir+`"
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-27 15:49:14 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2014-04-23 19:57:06 +00:00
|
|
|
|
|
|
|
// Create file
|
2020-09-10 16:25:56 +00:00
|
|
|
path := filepath.Join(a.Config.UIConfig.Dir, "my-file")
|
2022-11-10 16:26:01 +00:00
|
|
|
if err := os.WriteFile(path, []byte("test"), 0644); err != nil {
|
2014-04-23 20:10:18 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-04-23 19:57:06 +00:00
|
|
|
|
2020-10-01 11:26:19 +00:00
|
|
|
// Request the custom file
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/ui/my-file", nil)
|
2014-04-28 21:52:30 +00:00
|
|
|
req.URL.Scheme = "http"
|
2020-07-02 17:31:47 +00:00
|
|
|
req.URL.Host = a.HTTPAddr()
|
2014-04-23 19:57:06 +00:00
|
|
|
|
|
|
|
// Make the request
|
2015-10-22 14:47:50 +00:00
|
|
|
client := cleanhttp.DefaultClient()
|
2015-10-15 21:45:10 +00:00
|
|
|
resp, err := client.Do(req)
|
2014-04-28 21:52:30 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2020-04-06 13:11:04 +00:00
|
|
|
defer resp.Body.Close()
|
2014-04-23 19:57:06 +00:00
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Verify the response
|
2014-04-28 21:52:30 +00:00
|
|
|
if resp.StatusCode != 200 {
|
2014-04-23 19:57:06 +00:00
|
|
|
t.Fatalf("bad: %v", resp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the body
|
|
|
|
out := bytes.NewBuffer(nil)
|
|
|
|
io.Copy(out, resp.Body)
|
2019-07-20 13:37:19 +00:00
|
|
|
if out.String() != "test" {
|
2014-04-23 19:57:06 +00:00
|
|
|
t.Fatalf("bad: %s", out.Bytes())
|
|
|
|
}
|
|
|
|
}
|
2014-04-28 21:52:30 +00:00
|
|
|
|
2022-06-24 22:17:35 +00:00
|
|
|
func TestUINodes(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2022-07-29 21:36:22 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{HCL: ``, Overrides: `peering = { test_allow_peer_registrations = true }`})
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2022-07-29 21:36:22 +00:00
|
|
|
|
Added SOA configuration for DNS settings. (#4714)
This will allow to fine TUNE SOA settings sent by Consul in DNS responses,
for instance to be able to control negative ttl.
Will fix: https://github.com/hashicorp/consul/issues/4713
# Example
Override all settings:
* min_ttl: 0 => 60s
* retry: 600 (10m) => 300s (5 minutes),
* expire: 86400 (24h) => 43200 (12h)
* refresh: 3600 (1h) => 1800 (30 minutes)
```
consul agent -dev -hcl 'dns_config={soa={min_ttl=60,retry=300,expire=43200,refresh=1800}}'
```
Result:
```
dig +multiline @localhost -p 8600 service.consul
; <<>> DiG 9.12.1 <<>> +multiline @localhost -p 8600 service.consul
; (2 servers found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 36557
;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;service.consul. IN A
;; AUTHORITY SECTION:
consul. 0 IN SOA ns.consul. hostmaster.consul. (
1537959133 ; serial
1800 ; refresh (30 minutes)
300 ; retry (5 minutes)
43200 ; expire (12 hours)
60 ; minimum (1 minute)
)
;; Query time: 4 msec
;; SERVER: 127.0.0.1#8600(127.0.0.1)
;; WHEN: Wed Sep 26 12:52:13 CEST 2018
;; MSG SIZE rcvd: 93
```
2018-10-10 19:50:56 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-04-28 21:52:30 +00:00
|
|
|
|
2022-06-24 22:17:35 +00:00
|
|
|
args := []*structs.RegisterRequest{
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo-peer",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
PeerName: "peer1",
|
|
|
|
},
|
2015-11-15 05:05:37 +00:00
|
|
|
}
|
|
|
|
|
2022-06-24 22:17:35 +00:00
|
|
|
for _, reg := range args {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
err := a.RPC(context.Background(), "Catalog.Register", reg, &out)
|
2022-06-24 22:17:35 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// establish "peer1"
|
|
|
|
{
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
peerOne := &pbpeering.PeeringWriteRequest{
|
|
|
|
Peering: &pbpeering.Peering{
|
|
|
|
Name: "peer1",
|
2022-07-04 14:47:58 +00:00
|
|
|
State: pbpeering.PeeringState_ESTABLISHING,
|
2022-06-24 22:17:35 +00:00
|
|
|
PeerCAPems: nil,
|
|
|
|
PeerServerName: "fooservername",
|
|
|
|
PeerServerAddresses: []string{"addr1"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
_, err := a.rpcClientPeering.PeeringWrite(ctx, peerOne)
|
|
|
|
require.NoError(t, err)
|
2015-11-15 05:05:37 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/nodes/dc1", nil)
|
2014-04-28 21:52:30 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.UINodes(resp, req)
|
2014-04-28 21:52:30 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2022-06-24 22:17:35 +00:00
|
|
|
// Should be 3 nodes, and all the empty lists should be non-nil
|
2014-04-28 21:52:30 +00:00
|
|
|
nodes := obj.(structs.NodeDump)
|
2022-06-24 22:17:35 +00:00
|
|
|
require.Len(t, nodes, 3)
|
|
|
|
|
|
|
|
// check local nodes, services and checks
|
|
|
|
require.Equal(t, a.Config.NodeName, nodes[0].Node)
|
|
|
|
require.NotNil(t, nodes[0].Services)
|
|
|
|
require.Len(t, nodes[0].Services, 1)
|
|
|
|
require.NotNil(t, nodes[0].Checks)
|
|
|
|
require.Len(t, nodes[0].Checks, 1)
|
|
|
|
require.Equal(t, "test", nodes[1].Node)
|
|
|
|
require.NotNil(t, nodes[1].Services)
|
|
|
|
require.Len(t, nodes[1].Services, 0)
|
|
|
|
require.NotNil(t, nodes[1].Checks)
|
|
|
|
require.Len(t, nodes[1].Checks, 0)
|
|
|
|
|
|
|
|
// peered node
|
|
|
|
require.Equal(t, "foo-peer", nodes[2].Node)
|
|
|
|
require.Equal(t, "peer1", nodes[2].PeerName)
|
|
|
|
require.NotNil(t, nodes[2].Services)
|
|
|
|
require.Len(t, nodes[2].Services, 0)
|
|
|
|
require.NotNil(t, nodes[1].Checks)
|
|
|
|
require.Len(t, nodes[2].Services, 0)
|
Backport of Displays Consul version of each nodes in UI nodes section into release/1.16.x (#18113)
## Backport
This PR is auto-generated from #17754 to be assessed for backporting due
to the inclusion of the label backport/1.16.
:rotating_light:
>**Warning** automatic cherry-pick of commits failed. If the first
commit failed,
you will see a blank no-op commit below. If at least one commit
succeeded, you
will see the cherry-picked commits up to, _not including_, the commit
where
the merge conflict occurred.
The person who merged in the original PR is:
@WenInCode
This person should manually cherry-pick the original PR into a new
backport PR,
and close this one when the manual backport PR is merged in.
> merge conflict error: unable to process merge commit:
"1c757b8a2c1160ad53421b7b8bd7f74b205c4b89", automatic backport requires
rebase workflow
The below text is copied from the body of the original PR.
---
fixes #17097 Consul version of each nodes in UI nodes section
@jkirschner-hashicorp @huikang @team @Maintainers
Updated consul version in the request to register consul.
Added this as Node MetaData.
Fetching this new metadata in UI
<img width="1512" alt="Screenshot 2023-06-15 at 4 21 33 PM"
src="https://github.com/hashicorp/consul/assets/3139634/94f7cf6b-701f-4230-b9f7-d8c4342d0737">
Also made this backward compatible and tested.
Backward compatible in this context means - If consul binary with above
PR changes is deployed to one of node, and if UI is run from this node,
then the version of not only current (upgraded) node is displayed in UI
, but also of older nodes given that they are consul servers only.
For older (non-server or client) nodes the version is not added in
NodeMeta Data and hence the version will not be displayed for them.
If a old node is consul server, the version will be displayed. As the
endpoint - "v1/internal/ui/nodes?dc=dc1" was already returning version
in service meta. This is made use of in current UI changes.
<img width="1480" alt="Screenshot 2023-06-16 at 6 58 32 PM"
src="https://github.com/hashicorp/consul/assets/3139634/257942f4-fbed-437d-a492-37849d2bec4c">
---
<details>
<summary> Overview of commits </summary>
- 931fdfc7ecdc26bb7cc20b698c5e14c1b65fcc6e -
b3e2ec1ccaca3832a088ffcac54257fa6653c6c1 -
8d0e9a54907039c09330c6cd7b9e761566af6856 -
04e5d88cca37821f6667be381c16aaa5958b5c92 -
28286a2e98f8cd66ef8593c2e2893b4db6080417 -
43e50ad38207952a9c4d04d45d08b6b8f71b31fe -
0cf1b7077cdf255596254d9dc1624a269c42b94d -
27f34ce1c2973591f75b1e38a81ccbe7cee6cee3 -
2ac76d62b8cbae76b1a903021aebb9b865e29d6e -
3d618df9ef1d10dd5056c8b1ed865839c553a0e0 -
1c757b8a2c1160ad53421b7b8bd7f74b205c4b89 -
23ce82b4cee8f74dd634dbe145313e9a56c0077d -
4dc1c9b4c5aafdb8883ef977dfa9b39da138b6cb -
85a12a92528bfa267a039a9bb258170be914abf7 -
25d30a3fa980d130a30d445d26d47ef2356cb553 -
7f1d6192dce3352e92307175848b89f91e728c24 -
5174cbff84b0795d4cb36eb8980d0d5336091ac9
</details>
---------
Co-authored-by: Vijay Srinivas <vijayraghav22@gmail.com>
Co-authored-by: John Murret <john.murret@hashicorp.com>
Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com>
2023-07-17 17:27:50 +00:00
|
|
|
|
|
|
|
// check for consul-version in node meta
|
|
|
|
require.Equal(t, nodes[0].Meta[structs.MetaConsulVersion], a.Config.Version)
|
2014-04-28 21:52:30 +00:00
|
|
|
}
|
2014-04-28 22:09:46 +00:00
|
|
|
|
2022-06-24 22:17:35 +00:00
|
|
|
func TestUINodes_Filter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-04-16 16:00:15 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"os": "linux",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out))
|
2019-04-16 16:00:15 +00:00
|
|
|
|
|
|
|
args = &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test2",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"os": "macos",
|
|
|
|
},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out))
|
2019-04-16 16:00:15 +00:00
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/nodes/dc1?filter="+url.QueryEscape("Meta.os == linux"), nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UINodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 2 nodes, and all the empty lists should be non-nil
|
|
|
|
nodes := obj.(structs.NodeDump)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
require.Equal(t, nodes[0].Node, "test")
|
|
|
|
require.Empty(t, nodes[0].Services)
|
|
|
|
require.Empty(t, nodes[0].Checks)
|
|
|
|
}
|
|
|
|
|
2022-06-24 22:17:35 +00:00
|
|
|
func TestUINodeInfo(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-27 15:49:14 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2014-04-28 22:09:46 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/internal/ui/node/%s", a.Config.NodeName), nil)
|
2014-04-28 22:09:46 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.UINodeInfo(resp, req)
|
2020-08-13 22:39:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, resp.Code, http.StatusOK)
|
2014-04-28 22:09:46 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 1 node for the server
|
|
|
|
node := obj.(*structs.NodeInfo)
|
2017-05-21 07:11:09 +00:00
|
|
|
if node.Node != a.Config.NodeName {
|
2014-04-28 22:09:46 +00:00
|
|
|
t.Fatalf("bad: %v", node)
|
|
|
|
}
|
2015-11-15 05:05:37 +00:00
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2015-11-15 05:05:37 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ = http.NewRequest("GET", "/v1/internal/ui/node/test", nil)
|
2015-11-15 05:05:37 +00:00
|
|
|
resp = httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err = a.srv.UINodeInfo(resp, req)
|
2015-11-15 05:05:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be non-nil empty lists for services and checks
|
|
|
|
node = obj.(*structs.NodeInfo)
|
|
|
|
if node.Node != "test" ||
|
|
|
|
node.Services == nil || len(node.Services) != 0 ||
|
|
|
|
node.Checks == nil || len(node.Checks) != 0 {
|
|
|
|
t.Fatalf("bad: %v", node)
|
|
|
|
}
|
Backport of Displays Consul version of each nodes in UI nodes section into release/1.16.x (#18113)
## Backport
This PR is auto-generated from #17754 to be assessed for backporting due
to the inclusion of the label backport/1.16.
:rotating_light:
>**Warning** automatic cherry-pick of commits failed. If the first
commit failed,
you will see a blank no-op commit below. If at least one commit
succeeded, you
will see the cherry-picked commits up to, _not including_, the commit
where
the merge conflict occurred.
The person who merged in the original PR is:
@WenInCode
This person should manually cherry-pick the original PR into a new
backport PR,
and close this one when the manual backport PR is merged in.
> merge conflict error: unable to process merge commit:
"1c757b8a2c1160ad53421b7b8bd7f74b205c4b89", automatic backport requires
rebase workflow
The below text is copied from the body of the original PR.
---
fixes #17097 Consul version of each nodes in UI nodes section
@jkirschner-hashicorp @huikang @team @Maintainers
Updated consul version in the request to register consul.
Added this as Node MetaData.
Fetching this new metadata in UI
<img width="1512" alt="Screenshot 2023-06-15 at 4 21 33 PM"
src="https://github.com/hashicorp/consul/assets/3139634/94f7cf6b-701f-4230-b9f7-d8c4342d0737">
Also made this backward compatible and tested.
Backward compatible in this context means - If consul binary with above
PR changes is deployed to one of node, and if UI is run from this node,
then the version of not only current (upgraded) node is displayed in UI
, but also of older nodes given that they are consul servers only.
For older (non-server or client) nodes the version is not added in
NodeMeta Data and hence the version will not be displayed for them.
If a old node is consul server, the version will be displayed. As the
endpoint - "v1/internal/ui/nodes?dc=dc1" was already returning version
in service meta. This is made use of in current UI changes.
<img width="1480" alt="Screenshot 2023-06-16 at 6 58 32 PM"
src="https://github.com/hashicorp/consul/assets/3139634/257942f4-fbed-437d-a492-37849d2bec4c">
---
<details>
<summary> Overview of commits </summary>
- 931fdfc7ecdc26bb7cc20b698c5e14c1b65fcc6e -
b3e2ec1ccaca3832a088ffcac54257fa6653c6c1 -
8d0e9a54907039c09330c6cd7b9e761566af6856 -
04e5d88cca37821f6667be381c16aaa5958b5c92 -
28286a2e98f8cd66ef8593c2e2893b4db6080417 -
43e50ad38207952a9c4d04d45d08b6b8f71b31fe -
0cf1b7077cdf255596254d9dc1624a269c42b94d -
27f34ce1c2973591f75b1e38a81ccbe7cee6cee3 -
2ac76d62b8cbae76b1a903021aebb9b865e29d6e -
3d618df9ef1d10dd5056c8b1ed865839c553a0e0 -
1c757b8a2c1160ad53421b7b8bd7f74b205c4b89 -
23ce82b4cee8f74dd634dbe145313e9a56c0077d -
4dc1c9b4c5aafdb8883ef977dfa9b39da138b6cb -
85a12a92528bfa267a039a9bb258170be914abf7 -
25d30a3fa980d130a30d445d26d47ef2356cb553 -
7f1d6192dce3352e92307175848b89f91e728c24 -
5174cbff84b0795d4cb36eb8980d0d5336091ac9
</details>
---------
Co-authored-by: Vijay Srinivas <vijayraghav22@gmail.com>
Co-authored-by: John Murret <john.murret@hashicorp.com>
Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com>
2023-07-17 17:27:50 +00:00
|
|
|
|
|
|
|
// check for consul-version in node meta
|
|
|
|
require.Equal(t, node.Meta[structs.MetaConsulVersion], a.Config.Version)
|
2014-04-28 22:09:46 +00:00
|
|
|
}
|
2014-04-28 22:52:37 +00:00
|
|
|
|
2022-06-24 22:17:35 +00:00
|
|
|
func TestUIServices(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2022-07-29 21:36:22 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{HCL: ``, Overrides: `peering = { test_allow_peer_registrations = true }`})
|
2019-04-16 16:00:15 +00:00
|
|
|
defer a.Shutdown()
|
2022-07-29 21:36:22 +00:00
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
requests := []*structs.RegisterRequest{
|
|
|
|
// register foo node
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
Name: "node check",
|
|
|
|
Status: api.HealthPassing,
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2022-07-04 14:47:58 +00:00
|
|
|
// register api service on node foo
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Service: "api",
|
2020-09-30 14:23:19 +00:00
|
|
|
ID: "api-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Tags: []string{"tag1", "tag2"},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
2014-04-28 22:52:37 +00:00
|
|
|
&structs.HealthCheck{
|
2019-04-16 16:00:15 +00:00
|
|
|
Node: "foo",
|
|
|
|
Name: "api svc check",
|
|
|
|
ServiceName: "api",
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceID: "api-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Status: api.HealthWarning,
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
|
|
|
},
|
2020-09-30 14:23:19 +00:00
|
|
|
// register api-proxy svc on node foo
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
2020-09-30 14:23:19 +00:00
|
|
|
Service: "api-proxy",
|
|
|
|
ID: "api-proxy-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Tags: []string{},
|
2020-10-08 00:35:34 +00:00
|
|
|
Meta: map[string]string{structs.MetaExternalSource: "k8s"},
|
2019-04-16 16:00:15 +00:00
|
|
|
Port: 1234,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "api",
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
2014-04-28 22:52:37 +00:00
|
|
|
&structs.HealthCheck{
|
2019-04-16 16:00:15 +00:00
|
|
|
Node: "foo",
|
2020-09-30 14:23:19 +00:00
|
|
|
Name: "api proxy listening",
|
|
|
|
ServiceName: "api-proxy",
|
|
|
|
ServiceID: "api-proxy-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
// register bar node with service web
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
2020-09-30 14:23:19 +00:00
|
|
|
Kind: structs.ServiceKindTypical,
|
2019-04-16 16:00:15 +00:00
|
|
|
Service: "web",
|
2020-09-30 14:23:19 +00:00
|
|
|
ID: "web-1",
|
2019-04-16 16:00:15 +00:00
|
|
|
Tags: []string{},
|
2020-10-08 00:35:34 +00:00
|
|
|
Meta: map[string]string{structs.MetaExternalSource: "k8s"},
|
2019-04-16 16:00:15 +00:00
|
|
|
Port: 1234,
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
Checks: []*structs.HealthCheck{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Node: "bar",
|
|
|
|
Name: "web svc check",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-04-28 22:52:37 +00:00
|
|
|
ServiceName: "web",
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceID: "web-1",
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
// register zip node with service cache
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-04-16 16:00:15 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "zip",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "cache",
|
|
|
|
Tags: []string{},
|
2014-04-28 22:52:37 +00:00
|
|
|
},
|
|
|
|
},
|
2022-06-24 22:17:35 +00:00
|
|
|
// register peer node foo with peer service
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
ID: types.NodeID("e0155642-135d-4739-9853-a1ee6c9f945b"),
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
TaggedAddresses: map[string]string{
|
|
|
|
"lan": "127.0.0.2",
|
|
|
|
"wan": "198.18.0.2",
|
|
|
|
},
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"env": "production",
|
|
|
|
"os": "linux",
|
|
|
|
},
|
|
|
|
PeerName: "peer1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "serviceID",
|
|
|
|
Service: "service",
|
|
|
|
Port: 1235,
|
|
|
|
Address: "198.18.1.2",
|
|
|
|
PeerName: "peer1",
|
|
|
|
},
|
|
|
|
},
|
2014-04-28 22:52:37 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
for _, args := range requests {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out))
|
2014-04-28 22:52:37 +00:00
|
|
|
}
|
|
|
|
|
2022-06-24 22:17:35 +00:00
|
|
|
// establish "peer1"
|
|
|
|
{
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
peerOne := &pbpeering.PeeringWriteRequest{
|
|
|
|
Peering: &pbpeering.Peering{
|
|
|
|
Name: "peer1",
|
2022-07-04 14:47:58 +00:00
|
|
|
State: pbpeering.PeeringState_ESTABLISHING,
|
2022-06-24 22:17:35 +00:00
|
|
|
PeerCAPems: nil,
|
|
|
|
PeerServerName: "fooservername",
|
|
|
|
PeerServerAddresses: []string{"addr1"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
_, err := a.rpcClientPeering.PeeringWrite(ctx, peerOne)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2020-07-30 16:21:11 +00:00
|
|
|
// Register a terminating gateway associated with api and cache
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "terminating-gateway",
|
|
|
|
Service: "terminating-gateway",
|
|
|
|
Kind: structs.ServiceKindTerminatingGateway,
|
|
|
|
Port: 443,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var regOutput struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &arg, ®Output))
|
2020-07-30 16:21:11 +00:00
|
|
|
|
|
|
|
args := &structs.TerminatingGatewayConfigEntry{
|
|
|
|
Name: "terminating-gateway",
|
|
|
|
Kind: structs.TerminatingGateway,
|
|
|
|
Services: []structs.LinkedService{
|
|
|
|
{
|
|
|
|
Name: "api",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "cache",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
var configOutput bool
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &req, &configOutput))
|
2020-07-30 16:21:11 +00:00
|
|
|
require.True(t, configOutput)
|
|
|
|
|
|
|
|
// Web should not show up as ConnectedWithGateway since this one does not have any instances
|
|
|
|
args = &structs.TerminatingGatewayConfigEntry{
|
|
|
|
Name: "other-terminating-gateway",
|
|
|
|
Kind: structs.TerminatingGateway,
|
|
|
|
Services: []structs.LinkedService{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req = structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &req, &configOutput))
|
2020-07-30 16:21:11 +00:00
|
|
|
require.True(t, configOutput)
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
t.Run("No Filter", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/services/dc1", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIServices(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
2014-04-28 22:52:37 +00:00
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
// Should be 2 nodes, and all the empty lists should be non-nil
|
2020-09-30 14:23:19 +00:00
|
|
|
summary := obj.([]*ServiceListingSummary)
|
2022-06-24 22:17:35 +00:00
|
|
|
require.Len(t, summary, 7)
|
2014-04-28 22:52:37 +00:00
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
// internal accounting that users don't see can be blown away
|
|
|
|
for _, sum := range summary {
|
2021-04-13 16:12:13 +00:00
|
|
|
sum.transparentProxySet = false
|
2019-04-16 16:00:15 +00:00
|
|
|
sum.externalSourceSet = nil
|
2020-09-30 14:23:19 +00:00
|
|
|
sum.checks = nil
|
2019-04-16 16:00:15 +00:00
|
|
|
}
|
|
|
|
|
2020-09-30 14:23:19 +00:00
|
|
|
expected := []*ServiceListingSummary{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "api",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: []string{"tag1", "tag2"},
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 2,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 0,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
2020-07-30 16:21:11 +00:00
|
|
|
ConnectedWithProxy: true,
|
|
|
|
ConnectedWithGateway: true,
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Name: "api-proxy",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 2,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
ExternalSources: []string{"k8s"},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "cache",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"zip"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 0,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
2020-07-30 16:21:11 +00:00
|
|
|
ConnectedWithGateway: true,
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "consul",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{a.Config.NodeName},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTerminatingGateway,
|
|
|
|
Name: "terminating-gateway",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
GatewayConfig: GatewayConfig{AssociatedServiceCount: 2},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-07-30 16:21:11 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "web",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"bar"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 0,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 1,
|
|
|
|
ExternalSources: []string{"k8s"},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
2020-07-30 16:21:11 +00:00
|
|
|
},
|
2022-06-24 22:17:35 +00:00
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "service",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 0,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
PeerName: "peer1",
|
|
|
|
},
|
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
}
|
|
|
|
require.ElementsMatch(t, expected, summary)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Filtered", func(t *testing.T) {
|
|
|
|
filterQuery := url.QueryEscape("Service.Service == web or Service.Service == api")
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/services?filter="+filterQuery, nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIServices(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 2 nodes, and all the empty lists should be non-nil
|
2020-09-30 14:23:19 +00:00
|
|
|
summary := obj.([]*ServiceListingSummary)
|
2019-04-16 16:00:15 +00:00
|
|
|
require.Len(t, summary, 2)
|
|
|
|
|
|
|
|
// internal accounting that users don't see can be blown away
|
|
|
|
for _, sum := range summary {
|
|
|
|
sum.externalSourceSet = nil
|
2020-09-30 14:23:19 +00:00
|
|
|
sum.checks = nil
|
2019-04-16 16:00:15 +00:00
|
|
|
}
|
|
|
|
|
2020-09-30 14:23:19 +00:00
|
|
|
expected := []*ServiceListingSummary{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "api",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: []string{"tag1", "tag2"},
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 0,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
|
|
|
ConnectedWithProxy: false,
|
2020-07-30 16:21:11 +00:00
|
|
|
ConnectedWithGateway: false,
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2020-09-30 14:23:19 +00:00
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "web",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: nil,
|
|
|
|
Nodes: []string{"bar"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 0,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 1,
|
|
|
|
ExternalSources: []string{"k8s"},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
2019-04-16 16:00:15 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
require.ElementsMatch(t, expected, summary)
|
|
|
|
})
|
2020-12-15 16:52:00 +00:00
|
|
|
t.Run("Filtered without results", func(t *testing.T) {
|
|
|
|
filterQuery := url.QueryEscape("Service.Service == absent")
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/services?filter="+filterQuery, nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIServices(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Ensure the ServiceSummary doesn't output a `null` response when there
|
|
|
|
// are no matching summaries
|
|
|
|
require.NotNil(t, obj)
|
|
|
|
|
|
|
|
summary := obj.([]*ServiceListingSummary)
|
|
|
|
require.Len(t, summary, 0)
|
|
|
|
})
|
2014-04-28 22:52:37 +00:00
|
|
|
}
|
2020-05-11 17:35:17 +00:00
|
|
|
|
2022-09-22 21:37:58 +00:00
|
|
|
func TestUIExportedServices(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Parallel()
|
|
|
|
a := StartTestAgent(t, TestAgent{Overrides: `peering = { test_allow_peer_registrations = true }`})
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
requests := []*structs.RegisterRequest{
|
|
|
|
// register api service
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "node",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Service: "api",
|
|
|
|
ID: "api-1",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
Name: "api svc check",
|
|
|
|
ServiceName: "api",
|
|
|
|
ServiceID: "api-1",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// register api-proxy svc
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "node",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Service: "api-proxy",
|
|
|
|
ID: "api-proxy-1",
|
|
|
|
Tags: []string{},
|
|
|
|
Meta: map[string]string{structs.MetaExternalSource: "k8s"},
|
|
|
|
Port: 1234,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "api",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
Name: "api proxy listening",
|
|
|
|
ServiceName: "api-proxy",
|
|
|
|
ServiceID: "api-proxy-1",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// register service web
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Service: "web",
|
|
|
|
ID: "web-1",
|
|
|
|
Tags: []string{},
|
|
|
|
Meta: map[string]string{structs.MetaExternalSource: "k8s"},
|
|
|
|
Port: 1234,
|
|
|
|
},
|
|
|
|
Checks: []*structs.HealthCheck{
|
|
|
|
{
|
|
|
|
Node: "bar",
|
|
|
|
Name: "web svc check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceName: "web",
|
|
|
|
ServiceID: "web-1",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, args := range requests {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out))
|
2022-09-22 21:37:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// establish "peer1"
|
|
|
|
{
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
req := &pbpeering.GenerateTokenRequest{
|
|
|
|
PeerName: "peer1",
|
|
|
|
}
|
|
|
|
_, err := a.rpcClientPeering.GenerateToken(ctx, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Register exported services
|
|
|
|
args := &structs.ExportedServicesConfigEntry{
|
|
|
|
Name: "default",
|
|
|
|
Services: []structs.ExportedService{
|
|
|
|
{
|
|
|
|
Name: "api",
|
|
|
|
Consumers: []structs.ServiceConsumer{
|
|
|
|
{
|
2022-10-04 18:46:15 +00:00
|
|
|
Peer: "peer1",
|
2022-09-22 21:37:58 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
var configOutput bool
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &req, &configOutput))
|
2022-09-22 21:37:58 +00:00
|
|
|
require.True(t, configOutput)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("valid peer", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/exported-services?peer=peer1", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
a.srv.h.ServeHTTP(resp, req)
|
|
|
|
require.Equal(t, http.StatusOK, resp.Code)
|
|
|
|
|
|
|
|
decoder := json.NewDecoder(resp.Body)
|
|
|
|
var summary []*ServiceListingSummary
|
|
|
|
require.NoError(t, decoder.Decode(&summary))
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
require.Len(t, summary, 1)
|
|
|
|
|
|
|
|
// internal accounting that users don't see can be blown away
|
|
|
|
for _, sum := range summary {
|
|
|
|
sum.transparentProxySet = false
|
|
|
|
sum.externalSourceSet = nil
|
|
|
|
sum.checks = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := []*ServiceListingSummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
Name: "api",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.Equal(t, expected, summary)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("invalid peer", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/exported-services?peer=peer2", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
a.srv.h.ServeHTTP(resp, req)
|
|
|
|
require.Equal(t, http.StatusOK, resp.Code)
|
|
|
|
|
|
|
|
decoder := json.NewDecoder(resp.Body)
|
|
|
|
var summary []*ServiceListingSummary
|
|
|
|
require.NoError(t, decoder.Decode(&summary))
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
require.Len(t, summary, 0)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-05-11 17:35:17 +00:00
|
|
|
func TestUIGatewayServiceNodes_Terminating(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-05-11 17:35:17 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register terminating gateway and a service that will be associated with it
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "terminating-gateway",
|
|
|
|
Service: "terminating-gateway",
|
|
|
|
Kind: structs.ServiceKindTerminatingGateway,
|
|
|
|
Port: 443,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "terminating connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "terminating-gateway",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var regOutput struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &arg, ®Output))
|
2020-05-11 17:35:17 +00:00
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"primary"},
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db-warning",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &arg, ®Output))
|
2020-05-11 17:35:17 +00:00
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db2",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"backup"},
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db2-passing",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "db2",
|
|
|
|
},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &arg, ®Output))
|
2021-01-20 16:59:02 +00:00
|
|
|
}
|
2020-05-11 17:35:17 +00:00
|
|
|
|
2021-01-20 16:59:02 +00:00
|
|
|
{
|
|
|
|
// Request without having registered the config-entry, shouldn't respond with null
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/gateway-services-nodes/terminating-gateway", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIGatewayServicesNodes(resp, req)
|
|
|
|
require.Nil(t, err)
|
|
|
|
require.NotNil(t, obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2020-05-11 17:35:17 +00:00
|
|
|
// Register terminating-gateway config entry, linking it to db and redis (does not exist)
|
|
|
|
args := &structs.TerminatingGatewayConfigEntry{
|
|
|
|
Name: "terminating-gateway",
|
|
|
|
Kind: structs.TerminatingGateway,
|
|
|
|
Services: []structs.LinkedService{
|
|
|
|
{
|
|
|
|
Name: "db",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "redis",
|
|
|
|
CAFile: "/etc/certs/ca.pem",
|
|
|
|
CertFile: "/etc/certs/cert.pem",
|
|
|
|
KeyFile: "/etc/certs/key.pem",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
var configOutput bool
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &req, &configOutput))
|
2020-05-11 17:35:17 +00:00
|
|
|
require.True(t, configOutput)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/gateway-services-nodes/terminating-gateway", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIGatewayServicesNodes(resp, req)
|
2020-10-06 18:24:05 +00:00
|
|
|
require.Nil(t, err)
|
2020-05-11 17:35:17 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2020-09-30 14:23:19 +00:00
|
|
|
summary := obj.([]*ServiceSummary)
|
|
|
|
|
|
|
|
// internal accounting that users don't see can be blown away
|
|
|
|
for _, sum := range summary {
|
|
|
|
sum.externalSourceSet = nil
|
|
|
|
sum.checks = nil
|
|
|
|
}
|
|
|
|
|
2020-05-11 17:35:17 +00:00
|
|
|
expect := []*ServiceSummary{
|
|
|
|
{
|
2020-05-12 18:48:20 +00:00
|
|
|
Name: "redis",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "db",
|
2020-09-30 14:23:19 +00:00
|
|
|
Datacenter: "dc1",
|
2020-05-11 17:35:17 +00:00
|
|
|
Tags: []string{"backup", "primary"},
|
|
|
|
Nodes: []string{"bar", "baz"},
|
|
|
|
InstanceCount: 2,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 0,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
}
|
2020-10-06 18:24:05 +00:00
|
|
|
require.ElementsMatch(t, expect, summary)
|
2020-05-11 17:35:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUIGatewayServiceNodes_Ingress(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-05-11 17:35:17 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-06-22 19:14:12 +00:00
|
|
|
a := NewTestAgent(t, `alt_domain = "alt.consul."`)
|
2020-05-11 17:35:17 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register ingress gateway and a service that will be associated with it
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "ingress-gateway",
|
|
|
|
Service: "ingress-gateway",
|
|
|
|
Kind: structs.ServiceKindIngressGateway,
|
|
|
|
Port: 8443,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "ingress connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "ingress-gateway",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var regOutput struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &arg, ®Output))
|
2020-05-11 17:35:17 +00:00
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"primary"},
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db-warning",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &arg, ®Output))
|
2020-05-11 17:35:17 +00:00
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db2",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"backup"},
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db2-passing",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "db2",
|
|
|
|
},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &arg, ®Output))
|
2020-05-11 17:35:17 +00:00
|
|
|
|
2020-06-22 19:14:12 +00:00
|
|
|
// Set web protocol to http
|
|
|
|
svcDefaultsReq := structs.ConfigEntryRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceConfigEntry{
|
|
|
|
Name: "web",
|
|
|
|
Protocol: "http",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var configOutput bool
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &svcDefaultsReq, &configOutput))
|
2020-06-22 19:14:12 +00:00
|
|
|
require.True(t, configOutput)
|
|
|
|
|
2020-05-11 17:35:17 +00:00
|
|
|
// Register ingress-gateway config entry, linking it to db and redis (does not exist)
|
|
|
|
args := &structs.IngressGatewayConfigEntry{
|
|
|
|
Name: "ingress-gateway",
|
|
|
|
Kind: structs.IngressGateway,
|
|
|
|
Listeners: []structs.IngressListener{
|
|
|
|
{
|
|
|
|
Port: 8888,
|
|
|
|
Protocol: "tcp",
|
|
|
|
Services: []structs.IngressService{
|
|
|
|
{
|
|
|
|
Name: "db",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Port: 8080,
|
2020-06-22 19:14:12 +00:00
|
|
|
Protocol: "http",
|
2020-05-11 17:35:17 +00:00
|
|
|
Services: []structs.IngressService{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-06-22 19:14:12 +00:00
|
|
|
{
|
|
|
|
Port: 8081,
|
|
|
|
Protocol: "http",
|
|
|
|
Services: []structs.IngressService{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
Hosts: []string{"*.test.example.com"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &req, &configOutput))
|
2020-05-11 17:35:17 +00:00
|
|
|
require.True(t, configOutput)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/gateway-services-nodes/ingress-gateway", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIGatewayServicesNodes(resp, req)
|
2020-10-06 18:24:05 +00:00
|
|
|
require.Nil(t, err)
|
2020-05-11 17:35:17 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2023-08-23 16:53:44 +00:00
|
|
|
// Construct expected addresses so that differences between CE/Ent are
|
2021-02-25 09:34:47 +00:00
|
|
|
// handled by code. We specifically don't include the trailing DNS . here as
|
|
|
|
// we are constructing what we are expecting, not the actual value
|
2021-07-22 18:20:45 +00:00
|
|
|
webDNS := serviceIngressDNSName("web", "dc1", "consul", structs.DefaultEnterpriseMetaInDefaultPartition())
|
|
|
|
webDNSAlt := serviceIngressDNSName("web", "dc1", "alt.consul", structs.DefaultEnterpriseMetaInDefaultPartition())
|
|
|
|
dbDNS := serviceIngressDNSName("db", "dc1", "consul", structs.DefaultEnterpriseMetaInDefaultPartition())
|
|
|
|
dbDNSAlt := serviceIngressDNSName("db", "dc1", "alt.consul", structs.DefaultEnterpriseMetaInDefaultPartition())
|
2020-06-22 19:14:12 +00:00
|
|
|
|
2020-05-11 17:35:17 +00:00
|
|
|
dump := obj.([]*ServiceSummary)
|
|
|
|
expect := []*ServiceSummary{
|
|
|
|
{
|
2020-06-22 19:14:12 +00:00
|
|
|
Name: "web",
|
|
|
|
GatewayConfig: GatewayConfig{
|
|
|
|
Addresses: []string{
|
|
|
|
fmt.Sprintf("%s:8080", webDNS),
|
|
|
|
fmt.Sprintf("%s:8080", webDNSAlt),
|
|
|
|
"*.test.example.com:8081",
|
|
|
|
},
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "db",
|
2020-09-30 14:23:19 +00:00
|
|
|
Datacenter: "dc1",
|
2020-05-11 17:35:17 +00:00
|
|
|
Tags: []string{"backup", "primary"},
|
|
|
|
Nodes: []string{"bar", "baz"},
|
|
|
|
InstanceCount: 2,
|
|
|
|
ChecksPassing: 1,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 0,
|
2020-06-22 19:14:12 +00:00
|
|
|
GatewayConfig: GatewayConfig{
|
|
|
|
Addresses: []string{
|
|
|
|
fmt.Sprintf("%s:8888", dbDNS),
|
|
|
|
fmt.Sprintf("%s:8888", dbDNSAlt),
|
|
|
|
},
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-05-11 17:35:17 +00:00
|
|
|
},
|
|
|
|
}
|
2020-06-22 19:14:12 +00:00
|
|
|
|
|
|
|
// internal accounting that users don't see can be blown away
|
|
|
|
for _, sum := range dump {
|
|
|
|
sum.GatewayConfig.addressesSet = nil
|
2020-09-30 14:23:19 +00:00
|
|
|
sum.checks = nil
|
2020-06-22 19:14:12 +00:00
|
|
|
}
|
2020-10-06 18:24:05 +00:00
|
|
|
require.ElementsMatch(t, expect, dump)
|
2020-05-11 17:35:17 +00:00
|
|
|
}
|
2020-08-11 23:20:41 +00:00
|
|
|
|
|
|
|
func TestUIGatewayIntentions(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-08-11 23:20:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
2020-10-08 15:47:09 +00:00
|
|
|
testrpc.WaitForServiceIntentions(t, a.RPC, "dc1")
|
2020-08-11 23:20:41 +00:00
|
|
|
|
|
|
|
// Register terminating gateway and config entry linking it to postgres + redis
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "terminating-gateway",
|
|
|
|
Service: "terminating-gateway",
|
|
|
|
Kind: structs.ServiceKindTerminatingGateway,
|
|
|
|
Port: 443,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "terminating connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "terminating-gateway",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var regOutput struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &arg, ®Output))
|
2020-08-11 23:20:41 +00:00
|
|
|
|
|
|
|
args := &structs.TerminatingGatewayConfigEntry{
|
|
|
|
Name: "terminating-gateway",
|
|
|
|
Kind: structs.TerminatingGateway,
|
|
|
|
Services: []structs.LinkedService{
|
|
|
|
{
|
|
|
|
Name: "postgres",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "redis",
|
|
|
|
CAFile: "/etc/certs/ca.pem",
|
|
|
|
CertFile: "/etc/certs/cert.pem",
|
|
|
|
KeyFile: "/etc/certs/key.pem",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
var configOutput bool
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &req, &configOutput))
|
2020-08-11 23:20:41 +00:00
|
|
|
require.True(t, configOutput)
|
|
|
|
}
|
|
|
|
|
|
|
|
// create some symmetric intentions to ensure we are only matching on destination
|
|
|
|
{
|
|
|
|
for _, v := range []string{"*", "mysql", "redis", "postgres"} {
|
|
|
|
req := structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpCreate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.SourceName = "api"
|
|
|
|
req.Intention.DestinationName = v
|
|
|
|
|
|
|
|
var reply string
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Intention.Apply", &req, &reply))
|
2020-08-11 23:20:41 +00:00
|
|
|
|
|
|
|
req = structs.IntentionRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.IntentionOpCreate,
|
|
|
|
Intention: structs.TestIntention(t),
|
|
|
|
}
|
|
|
|
req.Intention.SourceName = v
|
|
|
|
req.Intention.DestinationName = "api"
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Intention.Apply", &req, &reply))
|
2020-08-11 23:20:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request intentions matching the gateway named "terminating-gateway"
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/gateway-intentions/terminating-gateway", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIGatewayIntentions(resp, req)
|
2020-10-06 18:24:05 +00:00
|
|
|
require.Nil(t, err)
|
2020-08-11 23:20:41 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
intentions := obj.(structs.Intentions)
|
2020-10-06 18:24:05 +00:00
|
|
|
require.Len(t, intentions, 3)
|
2020-08-11 23:20:41 +00:00
|
|
|
|
|
|
|
// Only intentions with linked services as a destination should be returned, and wildcard matches should be deduped
|
|
|
|
expected := []string{"postgres", "*", "redis"}
|
|
|
|
actual := []string{
|
|
|
|
intentions[0].DestinationName,
|
|
|
|
intentions[1].DestinationName,
|
|
|
|
intentions[2].DestinationName,
|
|
|
|
}
|
2020-10-06 18:24:05 +00:00
|
|
|
require.ElementsMatch(t, expected, actual)
|
2020-08-11 23:20:41 +00:00
|
|
|
}
|
2020-09-22 18:34:09 +00:00
|
|
|
|
|
|
|
func TestUIEndpoint_modifySummaryForGatewayService_UseRequestedDCInsteadOfConfigured(t *testing.T) {
|
|
|
|
dc := "dc2"
|
|
|
|
cfg := config.RuntimeConfig{Datacenter: "dc1", DNSDomain: "consul"}
|
|
|
|
sum := ServiceSummary{GatewayConfig: GatewayConfig{}}
|
|
|
|
gwsvc := structs.GatewayService{Service: structs.ServiceName{Name: "test"}, Port: 42}
|
|
|
|
modifySummaryForGatewayService(&cfg, dc, &sum, &gwsvc)
|
2020-09-25 15:31:42 +00:00
|
|
|
expected := serviceCanonicalDNSName("test", "ingress", "dc2", "consul", nil) + ":42"
|
|
|
|
require.Equal(t, expected, sum.GatewayConfig.Addresses[0])
|
2020-09-22 18:34:09 +00:00
|
|
|
}
|
2020-09-30 14:23:19 +00:00
|
|
|
|
|
|
|
func TestUIServiceTopology(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-09-30 14:23:19 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
2020-10-08 23:31:54 +00:00
|
|
|
// Register ingress -> api -> web -> redis
|
2020-09-30 14:23:19 +00:00
|
|
|
{
|
|
|
|
registrations := map[string]*structs.RegisterRequest{
|
2020-10-08 15:47:09 +00:00
|
|
|
"Node edge": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "edge",
|
|
|
|
Address: "127.0.0.20",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "edge",
|
|
|
|
CheckID: "edge:alive",
|
|
|
|
Name: "edge-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Ingress gateway on edge": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "edge",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindIngressGateway,
|
|
|
|
ID: "ingress",
|
|
|
|
Service: "ingress",
|
|
|
|
Port: 443,
|
|
|
|
Address: "198.18.1.20",
|
|
|
|
},
|
|
|
|
},
|
2020-09-30 14:23:19 +00:00
|
|
|
"Node foo": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:alive",
|
|
|
|
Name: "foo-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service api on foo": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Port: 9090,
|
|
|
|
Address: "198.18.1.2",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:api",
|
|
|
|
Name: "api-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "api",
|
|
|
|
ServiceName: "api",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service api-proxy": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "api-proxy",
|
|
|
|
Service: "api-proxy",
|
|
|
|
Port: 8443,
|
|
|
|
Address: "198.18.1.2",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "api",
|
2021-04-13 16:12:13 +00:00
|
|
|
Mode: structs.ProxyModeTransparent,
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:api-proxy",
|
|
|
|
Name: "api proxy listening",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "api-proxy",
|
|
|
|
ServiceName: "api-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Node bar": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "bar",
|
|
|
|
CheckID: "bar:alive",
|
|
|
|
Name: "bar-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service web on bar": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
|
|
|
Port: 80,
|
|
|
|
Address: "198.18.1.20",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "bar",
|
|
|
|
CheckID: "bar:web",
|
|
|
|
Name: "web-liveness",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "web",
|
|
|
|
ServiceName: "web",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service web-proxy on bar": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Port: 8443,
|
|
|
|
Address: "198.18.1.20",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
|
|
|
Upstreams: structs.Upstreams{
|
|
|
|
{
|
|
|
|
DestinationName: "redis",
|
|
|
|
LocalBindPort: 123,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "bar",
|
|
|
|
CheckID: "bar:web-proxy",
|
|
|
|
Name: "web proxy listening",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: "web-proxy",
|
|
|
|
ServiceName: "web-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Node baz": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
Address: "127.0.0.4",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "baz",
|
|
|
|
CheckID: "baz:alive",
|
|
|
|
Name: "baz-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service web on baz": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
|
|
|
Port: 80,
|
|
|
|
Address: "198.18.1.40",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "baz",
|
|
|
|
CheckID: "baz:web",
|
|
|
|
Name: "web-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "web",
|
|
|
|
ServiceName: "web",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service web-proxy on baz": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Port: 8443,
|
|
|
|
Address: "198.18.1.40",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
2021-04-13 16:12:13 +00:00
|
|
|
Mode: structs.ProxyModeTransparent,
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "baz",
|
|
|
|
CheckID: "baz:web-proxy",
|
|
|
|
Name: "web proxy listening",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: "web-proxy",
|
|
|
|
ServiceName: "web-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Node zip": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "zip",
|
|
|
|
Address: "127.0.0.5",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "zip",
|
|
|
|
CheckID: "zip:alive",
|
|
|
|
Name: "zip-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service redis on zip": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "zip",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Port: 6379,
|
|
|
|
Address: "198.18.1.60",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "zip",
|
|
|
|
CheckID: "zip:redis",
|
|
|
|
Name: "redis-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service redis-proxy on zip": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "zip",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "redis-proxy",
|
|
|
|
Service: "redis-proxy",
|
|
|
|
Port: 8443,
|
|
|
|
Address: "198.18.1.60",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "redis",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "zip",
|
|
|
|
CheckID: "zip:redis-proxy",
|
|
|
|
Name: "redis proxy listening",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: "redis-proxy",
|
|
|
|
ServiceName: "redis-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2022-02-16 21:51:54 +00:00
|
|
|
"Node cnative": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "cnative",
|
|
|
|
Address: "127.0.0.6",
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "cnative",
|
|
|
|
CheckID: "cnative:alive",
|
|
|
|
Name: "cnative-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service cbackend on cnative": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "cnative",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "cbackend",
|
|
|
|
Service: "cbackend",
|
|
|
|
Port: 8080,
|
|
|
|
Address: "198.18.1.70",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "cnative",
|
|
|
|
CheckID: "cnative:cbackend",
|
|
|
|
Name: "cbackend-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "cbackend",
|
|
|
|
ServiceName: "cbackend",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service cbackend-proxy on cnative": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "cnative",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "cbackend-proxy",
|
|
|
|
Service: "cbackend-proxy",
|
|
|
|
Port: 8443,
|
|
|
|
Address: "198.18.1.70",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "cbackend",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "cnative",
|
|
|
|
CheckID: "cnative:cbackend-proxy",
|
|
|
|
Name: "cbackend proxy listening",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: "cbackend-proxy",
|
|
|
|
ServiceName: "cbackend-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service cfrontend on cnative": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "cnative",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "cfrontend",
|
|
|
|
Service: "cfrontend",
|
|
|
|
Port: 9080,
|
|
|
|
Address: "198.18.1.70",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "cnative",
|
|
|
|
CheckID: "cnative:cfrontend",
|
|
|
|
Name: "cfrontend-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "cfrontend",
|
|
|
|
ServiceName: "cfrontend",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service cfrontend-proxy on cnative": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "cnative",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "cfrontend-proxy",
|
|
|
|
Service: "cfrontend-proxy",
|
|
|
|
Port: 9443,
|
|
|
|
Address: "198.18.1.70",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "cfrontend",
|
|
|
|
Upstreams: structs.Upstreams{
|
|
|
|
{
|
|
|
|
DestinationName: "cproxy",
|
|
|
|
LocalBindPort: 123,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "cnative",
|
|
|
|
CheckID: "cnative:cfrontend-proxy",
|
|
|
|
Name: "cfrontend proxy listening",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: "cfrontend-proxy",
|
|
|
|
ServiceName: "cfrontend-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service cproxy on cnative": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "cnative",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
2023-07-31 14:00:40 +00:00
|
|
|
ID: "cproxy-https",
|
2022-02-16 21:51:54 +00:00
|
|
|
Service: "cproxy",
|
|
|
|
Port: 1111,
|
|
|
|
Address: "198.18.1.70",
|
2023-07-31 14:00:40 +00:00
|
|
|
Tags: []string{"https"},
|
2022-02-16 21:51:54 +00:00
|
|
|
Connect: structs.ServiceConnect{Native: true},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "cnative",
|
2023-07-31 14:00:40 +00:00
|
|
|
CheckID: "cnative:cproxy-https",
|
2022-02-16 21:51:54 +00:00
|
|
|
Name: "cproxy-liveness",
|
|
|
|
Status: api.HealthPassing,
|
2023-07-31 14:00:40 +00:00
|
|
|
ServiceID: "cproxy-https",
|
|
|
|
ServiceName: "cproxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service cproxy/http on cnative": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "cnative",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "cproxy-http",
|
|
|
|
Service: "cproxy",
|
|
|
|
Port: 1112,
|
|
|
|
Address: "198.18.1.70",
|
|
|
|
Tags: []string{"http"},
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "cnative",
|
|
|
|
CheckID: "cnative:cproxy-http",
|
|
|
|
Name: "cproxy-liveness",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "cproxy-http",
|
2022-02-16 21:51:54 +00:00
|
|
|
ServiceName: "cproxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-09-30 14:23:19 +00:00
|
|
|
}
|
|
|
|
for _, args := range registrations {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out))
|
2020-09-30 14:23:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-13 16:12:13 +00:00
|
|
|
// ingress -> api gateway config entry (but no intention)
|
|
|
|
// wildcard deny intention
|
|
|
|
// api -> web exact intention
|
|
|
|
// web -> redis exact intention
|
2022-02-16 21:51:54 +00:00
|
|
|
// cfrontend -> cproxy exact intention
|
|
|
|
// cproxy -> cbackend exact intention
|
2020-10-08 00:35:34 +00:00
|
|
|
{
|
|
|
|
entries := []structs.ConfigEntryRequest{
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ProxyConfigEntry{
|
|
|
|
Kind: structs.ProxyDefaults,
|
|
|
|
Name: structs.ProxyConfigGlobal,
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"protocol": "http",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-10-08 23:31:54 +00:00
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceConfigEntry{
|
|
|
|
Kind: structs.ServiceDefaults,
|
|
|
|
Name: "api",
|
|
|
|
Protocol: "tcp",
|
|
|
|
},
|
|
|
|
},
|
2021-04-13 16:12:13 +00:00
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceIntentionsConfigEntry{
|
|
|
|
Kind: structs.ServiceIntentions,
|
|
|
|
Name: "*",
|
|
|
|
Meta: map[string]string{structs.MetaExternalSource: "nomad"},
|
|
|
|
Sources: []*structs.SourceIntention{
|
|
|
|
{
|
|
|
|
Name: "*",
|
|
|
|
Action: structs.IntentionActionDeny,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-10-08 00:35:34 +00:00
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceIntentionsConfigEntry{
|
|
|
|
Kind: structs.ServiceIntentions,
|
|
|
|
Name: "redis",
|
|
|
|
Sources: []*structs.SourceIntention{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
Permissions: []*structs.IntentionPermission{
|
|
|
|
{
|
|
|
|
Action: structs.IntentionActionAllow,
|
|
|
|
HTTP: &structs.IntentionHTTPPermission{
|
|
|
|
Methods: []string{"GET"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceIntentionsConfigEntry{
|
|
|
|
Kind: structs.ServiceIntentions,
|
2021-04-13 16:12:13 +00:00
|
|
|
Name: "web",
|
2020-10-08 00:35:34 +00:00
|
|
|
Sources: []*structs.SourceIntention{
|
|
|
|
{
|
2021-04-13 16:12:13 +00:00
|
|
|
Action: structs.IntentionActionAllow,
|
|
|
|
Name: "api",
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
|
|
|
},
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
|
|
|
},
|
2020-10-08 15:47:09 +00:00
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceIntentionsConfigEntry{
|
|
|
|
Kind: structs.ServiceIntentions,
|
|
|
|
Name: "api",
|
|
|
|
Sources: []*structs.SourceIntention{
|
|
|
|
{
|
|
|
|
Name: "ingress",
|
|
|
|
Action: structs.IntentionActionAllow,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.IngressGatewayConfigEntry{
|
|
|
|
Kind: "ingress-gateway",
|
|
|
|
Name: "ingress",
|
|
|
|
Listeners: []structs.IngressListener{
|
|
|
|
{
|
|
|
|
Port: 1111,
|
2020-10-08 23:31:54 +00:00
|
|
|
Protocol: "tcp",
|
2020-10-08 15:47:09 +00:00
|
|
|
Services: []structs.IngressService{
|
|
|
|
{
|
|
|
|
Name: "api",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-10-08 15:47:09 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2022-02-16 21:51:54 +00:00
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceIntentionsConfigEntry{
|
|
|
|
Kind: structs.ServiceIntentions,
|
|
|
|
Name: "cproxy",
|
|
|
|
Sources: []*structs.SourceIntention{
|
|
|
|
{
|
|
|
|
Action: structs.IntentionActionAllow,
|
|
|
|
Name: "cfrontend",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceIntentionsConfigEntry{
|
|
|
|
Kind: structs.ServiceIntentions,
|
|
|
|
Name: "cbackend",
|
|
|
|
Sources: []*structs.SourceIntention{
|
|
|
|
{
|
|
|
|
Action: structs.IntentionActionAllow,
|
|
|
|
Name: "cproxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-09-30 14:23:19 +00:00
|
|
|
}
|
2020-10-08 00:35:34 +00:00
|
|
|
for _, req := range entries {
|
|
|
|
out := false
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &req, &out))
|
2020-09-30 14:23:19 +00:00
|
|
|
}
|
2020-10-08 00:35:34 +00:00
|
|
|
}
|
|
|
|
|
2021-08-25 19:20:32 +00:00
|
|
|
type testCase struct {
|
|
|
|
name string
|
|
|
|
httpReq *http.Request
|
|
|
|
want *ServiceTopology
|
|
|
|
wantErr string
|
|
|
|
}
|
2020-10-08 23:31:54 +00:00
|
|
|
|
2021-08-25 19:20:32 +00:00
|
|
|
run := func(t *testing.T, tc testCase) {
|
2020-10-08 15:47:09 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
resp := httptest.NewRecorder()
|
2021-08-25 19:20:32 +00:00
|
|
|
obj, err := a.srv.UIServiceTopology(resp, tc.httpReq)
|
|
|
|
|
|
|
|
if tc.wantErr != "" {
|
2022-01-31 16:17:35 +00:00
|
|
|
assert.NotNil(r, err)
|
2021-08-25 19:20:32 +00:00
|
|
|
assert.Nil(r, tc.want) // should not define a non-nil want
|
2022-01-31 16:17:35 +00:00
|
|
|
require.Contains(r, err.Error(), tc.wantErr)
|
2021-08-25 19:20:32 +00:00
|
|
|
require.Nil(r, obj)
|
|
|
|
return
|
|
|
|
}
|
2022-01-31 16:17:35 +00:00
|
|
|
assert.Nil(r, err)
|
2021-08-25 19:20:32 +00:00
|
|
|
|
2020-10-08 15:47:09 +00:00
|
|
|
require.NoError(r, checkIndex(resp))
|
2021-08-25 19:20:32 +00:00
|
|
|
require.NotNil(r, obj)
|
|
|
|
result := obj.(ServiceTopology)
|
|
|
|
clearUnexportedFields(result)
|
|
|
|
|
|
|
|
require.Equal(r, *tc.want, result)
|
|
|
|
})
|
|
|
|
}
|
2020-10-08 15:47:09 +00:00
|
|
|
|
2021-08-25 19:20:32 +00:00
|
|
|
tcs := []testCase{
|
|
|
|
{
|
|
|
|
name: "request without kind",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/ingress", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
wantErr: "Missing service kind",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "request with unsupported kind",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/ingress?kind=not-a-kind", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
wantErr: `Unsupported service kind "not-a-kind"`,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ingress",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/ingress?kind=ingress-gateway", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
want: &ServiceTopology{
|
2021-04-13 16:12:13 +00:00
|
|
|
Protocol: "tcp",
|
|
|
|
TransparentProxy: false,
|
2020-10-08 15:47:09 +00:00
|
|
|
Upstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
2021-04-13 16:12:13 +00:00
|
|
|
Name: "api",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 3,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2021-04-13 16:12:13 +00:00
|
|
|
TransparentProxy: true,
|
2020-10-08 15:47:09 +00:00
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
2021-04-13 01:32:09 +00:00
|
|
|
DefaultAllow: true,
|
2020-10-08 15:47:09 +00:00
|
|
|
Allowed: true,
|
|
|
|
HasPermissions: false,
|
2020-10-23 16:45:41 +00:00
|
|
|
HasExact: true,
|
2020-10-08 15:47:09 +00:00
|
|
|
},
|
2021-04-13 16:12:13 +00:00
|
|
|
Source: structs.TopologySourceRegistration,
|
2020-10-08 15:47:09 +00:00
|
|
|
},
|
|
|
|
},
|
2020-10-23 16:45:41 +00:00
|
|
|
Downstreams: []*ServiceTopologySummary{},
|
2020-10-08 15:47:09 +00:00
|
|
|
FilteredByACLs: false,
|
2021-08-25 19:20:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "api",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/api?kind=", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
want: &ServiceTopology{
|
2021-04-13 16:12:13 +00:00
|
|
|
Protocol: "tcp",
|
|
|
|
TransparentProxy: true,
|
2020-10-08 15:47:09 +00:00
|
|
|
Downstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
2021-04-13 16:12:13 +00:00
|
|
|
Name: "ingress",
|
|
|
|
Kind: structs.ServiceKindIngressGateway,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"edge"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 1,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2021-04-13 16:12:13 +00:00
|
|
|
TransparentProxy: false,
|
2020-10-08 15:47:09 +00:00
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
2021-04-13 01:32:09 +00:00
|
|
|
DefaultAllow: true,
|
2020-10-08 15:47:09 +00:00
|
|
|
Allowed: true,
|
|
|
|
HasPermissions: false,
|
2020-10-23 16:45:41 +00:00
|
|
|
HasExact: true,
|
2020-10-08 15:47:09 +00:00
|
|
|
},
|
2021-04-13 16:12:13 +00:00
|
|
|
Source: structs.TopologySourceRegistration,
|
2020-10-08 15:47:09 +00:00
|
|
|
},
|
|
|
|
},
|
2020-10-08 00:35:34 +00:00
|
|
|
Upstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
2021-04-13 16:12:13 +00:00
|
|
|
Name: "web",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"bar", "baz"},
|
|
|
|
InstanceCount: 2,
|
|
|
|
ChecksPassing: 3,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 2,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2021-04-13 16:12:13 +00:00
|
|
|
TransparentProxy: false,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
2021-04-13 01:32:09 +00:00
|
|
|
DefaultAllow: true,
|
2021-04-13 16:12:13 +00:00
|
|
|
Allowed: true,
|
2020-10-08 00:35:34 +00:00
|
|
|
HasPermissions: false,
|
2021-04-13 16:12:13 +00:00
|
|
|
HasExact: true,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
2021-04-13 16:12:13 +00:00
|
|
|
Source: structs.TopologySourceSpecificIntention,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
FilteredByACLs: false,
|
2021-08-25 19:20:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "web",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/web?kind=", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
want: &ServiceTopology{
|
2021-04-13 16:12:13 +00:00
|
|
|
Protocol: "http",
|
|
|
|
TransparentProxy: false,
|
2020-10-08 00:35:34 +00:00
|
|
|
Upstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
2021-04-13 16:12:13 +00:00
|
|
|
Name: "redis",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"zip"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 2,
|
|
|
|
ChecksCritical: 1,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2021-04-13 16:12:13 +00:00
|
|
|
TransparentProxy: false,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
2021-04-13 01:32:09 +00:00
|
|
|
DefaultAllow: true,
|
2020-10-08 00:35:34 +00:00
|
|
|
Allowed: false,
|
|
|
|
HasPermissions: true,
|
2020-10-23 16:45:41 +00:00
|
|
|
HasExact: true,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
2021-04-13 16:12:13 +00:00
|
|
|
Source: structs.TopologySourceRegistration,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
2020-10-08 00:35:34 +00:00
|
|
|
Downstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
2021-04-13 16:12:13 +00:00
|
|
|
Name: "api",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 3,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2021-04-13 16:12:13 +00:00
|
|
|
TransparentProxy: true,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
2021-04-13 01:32:09 +00:00
|
|
|
DefaultAllow: true,
|
2021-04-13 16:12:13 +00:00
|
|
|
Allowed: true,
|
2020-10-08 00:35:34 +00:00
|
|
|
HasPermissions: false,
|
2021-04-13 16:12:13 +00:00
|
|
|
HasExact: true,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
2021-04-13 16:12:13 +00:00
|
|
|
Source: structs.TopologySourceSpecificIntention,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
2020-10-08 00:35:34 +00:00
|
|
|
FilteredByACLs: false,
|
2021-08-25 19:20:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "redis",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/redis?kind=", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
want: &ServiceTopology{
|
|
|
|
Protocol: "http",
|
|
|
|
TransparentProxy: false,
|
|
|
|
Upstreams: []*ServiceTopologySummary{},
|
2020-10-08 00:35:34 +00:00
|
|
|
Downstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Name: "web",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"bar", "baz"},
|
|
|
|
InstanceCount: 2,
|
|
|
|
ChecksPassing: 3,
|
|
|
|
ChecksWarning: 1,
|
|
|
|
ChecksCritical: 2,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
2021-04-13 01:32:09 +00:00
|
|
|
DefaultAllow: true,
|
2020-10-08 00:35:34 +00:00
|
|
|
Allowed: false,
|
|
|
|
HasPermissions: true,
|
2020-10-23 16:45:41 +00:00
|
|
|
HasExact: true,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
2021-04-13 16:12:13 +00:00
|
|
|
Source: structs.TopologySourceRegistration,
|
2020-10-08 00:35:34 +00:00
|
|
|
},
|
2020-09-30 14:23:19 +00:00
|
|
|
},
|
2020-10-08 00:35:34 +00:00
|
|
|
FilteredByACLs: false,
|
2021-08-25 19:20:32 +00:00
|
|
|
},
|
|
|
|
},
|
2022-02-16 21:51:54 +00:00
|
|
|
{
|
|
|
|
name: "cproxy",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/cproxy?kind=", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
want: &ServiceTopology{
|
|
|
|
Protocol: "http",
|
|
|
|
TransparentProxy: false,
|
|
|
|
Upstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Name: "cbackend",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"cnative"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 2,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 1,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
|
|
|
DefaultAllow: true,
|
|
|
|
Allowed: true,
|
|
|
|
HasPermissions: false,
|
|
|
|
HasExact: true,
|
|
|
|
},
|
|
|
|
Source: structs.TopologySourceSpecificIntention,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Downstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Name: "cfrontend",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"cnative"},
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 2,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 1,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
|
|
|
DefaultAllow: true,
|
|
|
|
Allowed: true,
|
|
|
|
HasPermissions: false,
|
|
|
|
HasExact: true,
|
|
|
|
},
|
|
|
|
Source: structs.TopologySourceRegistration,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
FilteredByACLs: false,
|
|
|
|
},
|
|
|
|
},
|
2023-07-31 14:00:40 +00:00
|
|
|
{
|
|
|
|
name: "cbackend",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/cbackend?kind=", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
want: &ServiceTopology{
|
|
|
|
Protocol: "http",
|
|
|
|
TransparentProxy: false,
|
|
|
|
Upstreams: []*ServiceTopologySummary{},
|
|
|
|
Downstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Name: "cproxy",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Tags: []string{"http", "https"},
|
|
|
|
Nodes: []string{"cnative"},
|
|
|
|
InstanceCount: 2,
|
|
|
|
ChecksPassing: 3,
|
|
|
|
ChecksWarning: 0,
|
|
|
|
ChecksCritical: 0,
|
|
|
|
ConnectNative: true,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
|
|
|
DefaultAllow: true,
|
|
|
|
Allowed: true,
|
|
|
|
HasPermissions: false,
|
|
|
|
HasExact: true,
|
|
|
|
},
|
|
|
|
Source: structs.TopologySourceSpecificIntention,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
FilteredByACLs: false,
|
|
|
|
},
|
|
|
|
},
|
2021-08-25 19:20:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tcs {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
run(t, tc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// clearUnexportedFields sets unexported members of ServiceTopology to their
|
|
|
|
// type defaults, since the fields are not marshalled in the JSON response.
|
|
|
|
func clearUnexportedFields(result ServiceTopology) {
|
|
|
|
for _, u := range result.Upstreams {
|
|
|
|
u.transparentProxySet = false
|
|
|
|
u.externalSourceSet = nil
|
|
|
|
u.checks = nil
|
|
|
|
}
|
|
|
|
for _, d := range result.Downstreams {
|
|
|
|
d.transparentProxySet = false
|
|
|
|
d.externalSourceSet = nil
|
|
|
|
d.checks = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUIServiceTopology_RoutingConfigs(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register dashboard -> routing-config -> { counting, counting-v2 }
|
|
|
|
{
|
|
|
|
registrations := map[string]*structs.RegisterRequest{
|
|
|
|
"Service dashboard": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "dashboard",
|
|
|
|
Service: "dashboard",
|
|
|
|
Port: 9002,
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:dashboard",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "dashboard",
|
|
|
|
ServiceName: "dashboard",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service dashboard-proxy": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "dashboard-sidecar-proxy",
|
|
|
|
Service: "dashboard-sidecar-proxy",
|
|
|
|
Port: 5000,
|
|
|
|
Address: "198.18.1.0",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "dashboard",
|
|
|
|
DestinationServiceID: "dashboard",
|
|
|
|
LocalServiceAddress: "127.0.0.1",
|
|
|
|
LocalServicePort: 9002,
|
|
|
|
Upstreams: []structs.Upstream{
|
|
|
|
{
|
|
|
|
DestinationType: "service",
|
|
|
|
DestinationName: "routing-config",
|
|
|
|
LocalBindPort: 5000,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
LocallyRegisteredAsSidecar: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service counting": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "counting",
|
|
|
|
Service: "counting",
|
|
|
|
Port: 9003,
|
|
|
|
Address: "198.18.1.1",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:api",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "counting",
|
|
|
|
ServiceName: "counting",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service counting-proxy": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "counting-proxy",
|
|
|
|
Service: "counting-proxy",
|
|
|
|
Port: 5001,
|
|
|
|
Address: "198.18.1.1",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "counting",
|
|
|
|
},
|
|
|
|
LocallyRegisteredAsSidecar: true,
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:counting-proxy",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "counting-proxy",
|
|
|
|
ServiceName: "counting-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service counting-v2": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "counting-v2",
|
|
|
|
Service: "counting-v2",
|
|
|
|
Port: 9004,
|
|
|
|
Address: "198.18.1.2",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:api",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "counting-v2",
|
|
|
|
ServiceName: "counting-v2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"Service counting-v2-proxy": {
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "counting-v2-proxy",
|
|
|
|
Service: "counting-v2-proxy",
|
|
|
|
Port: 5002,
|
|
|
|
Address: "198.18.1.2",
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "counting-v2",
|
|
|
|
},
|
|
|
|
LocallyRegisteredAsSidecar: true,
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "foo:counting-v2-proxy",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "counting-v2-proxy",
|
|
|
|
ServiceName: "counting-v2-proxy",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, args := range registrations {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out))
|
2021-08-25 19:20:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
{
|
|
|
|
entries := []structs.ConfigEntryRequest{
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ProxyConfigEntry{
|
|
|
|
Kind: structs.ProxyDefaults,
|
|
|
|
Name: structs.ProxyConfigGlobal,
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"protocol": "http",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ServiceRouterConfigEntry{
|
|
|
|
Kind: structs.ServiceRouter,
|
|
|
|
Name: "routing-config",
|
|
|
|
Routes: []structs.ServiceRoute{
|
|
|
|
{
|
|
|
|
Match: &structs.ServiceRouteMatch{
|
|
|
|
HTTP: &structs.ServiceRouteHTTPMatch{
|
|
|
|
PathPrefix: "/v2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Destination: &structs.ServiceRouteDestination{
|
|
|
|
Service: "counting-v2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Match: &structs.ServiceRouteMatch{
|
|
|
|
HTTP: &structs.ServiceRouteHTTPMatch{
|
|
|
|
PathPrefix: "/",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Destination: &structs.ServiceRouteDestination{
|
|
|
|
Service: "counting",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, req := range entries {
|
|
|
|
out := false
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &req, &out))
|
2021-08-25 19:20:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type testCase struct {
|
|
|
|
name string
|
|
|
|
httpReq *http.Request
|
|
|
|
want *ServiceTopology
|
|
|
|
wantErr string
|
|
|
|
}
|
|
|
|
|
|
|
|
run := func(t *testing.T, tc testCase) {
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.UIServiceTopology(resp, tc.httpReq)
|
|
|
|
assert.Nil(r, err)
|
|
|
|
|
|
|
|
if tc.wantErr != "" {
|
|
|
|
assert.Nil(r, tc.want) // should not define a non-nil want
|
|
|
|
require.Equal(r, tc.wantErr, resp.Body.String())
|
|
|
|
require.Nil(r, obj)
|
|
|
|
return
|
2020-10-08 00:35:34 +00:00
|
|
|
}
|
2021-08-25 19:20:32 +00:00
|
|
|
|
|
|
|
require.NoError(r, checkIndex(resp))
|
|
|
|
require.NotNil(r, obj)
|
2020-10-08 00:35:34 +00:00
|
|
|
result := obj.(ServiceTopology)
|
2021-08-25 19:20:32 +00:00
|
|
|
clearUnexportedFields(result)
|
2020-09-30 14:23:19 +00:00
|
|
|
|
2021-08-25 19:20:32 +00:00
|
|
|
require.Equal(r, *tc.want, result)
|
2020-10-08 00:35:34 +00:00
|
|
|
})
|
2021-08-25 19:20:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tcs := []testCase{
|
|
|
|
{
|
|
|
|
name: "dashboard has upstream routing-config",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/dashboard?kind=", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
want: &ServiceTopology{
|
|
|
|
Protocol: "http",
|
|
|
|
Downstreams: []*ServiceTopologySummary{},
|
|
|
|
Upstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
|
|
|
Name: "routing-config",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
TransparentProxy: false,
|
|
|
|
},
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
|
|
|
DefaultAllow: true,
|
|
|
|
Allowed: true,
|
|
|
|
},
|
|
|
|
Source: structs.TopologySourceRoutingConfig,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "counting has downstream dashboard",
|
|
|
|
httpReq: func() *http.Request {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/internal/ui/service-topology/counting?kind=", nil)
|
|
|
|
return req
|
|
|
|
}(),
|
|
|
|
want: &ServiceTopology{
|
|
|
|
Protocol: "http",
|
|
|
|
Upstreams: []*ServiceTopologySummary{},
|
|
|
|
Downstreams: []*ServiceTopologySummary{
|
|
|
|
{
|
|
|
|
ServiceSummary: ServiceSummary{
|
2021-08-26 18:06:49 +00:00
|
|
|
Name: "dashboard",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Nodes: []string{"foo"},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
InstanceCount: 1,
|
|
|
|
ChecksPassing: 1,
|
2021-08-25 19:20:32 +00:00
|
|
|
},
|
|
|
|
Source: "proxy-registration",
|
|
|
|
Intention: structs.IntentionDecisionSummary{
|
|
|
|
Allowed: true,
|
|
|
|
DefaultAllow: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tcs {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
run(t, tc)
|
|
|
|
})
|
|
|
|
}
|
2020-09-30 14:23:19 +00:00
|
|
|
}
|
2020-09-24 10:13:14 +00:00
|
|
|
|
|
|
|
func TestUIEndpoint_MetricsProxy(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-09-24 10:13:14 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
var lastHeadersSent atomic.Value
|
|
|
|
|
|
|
|
backendH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
lastHeadersSent.Store(r.Header)
|
|
|
|
if r.URL.Path == "/some/prefix/ok" {
|
|
|
|
w.Header().Set("X-Random-Header", "Foo")
|
|
|
|
w.Write([]byte("OK"))
|
|
|
|
return
|
|
|
|
}
|
2020-10-09 11:25:17 +00:00
|
|
|
if r.URL.Path == "/some/prefix/query-echo" {
|
|
|
|
w.Write([]byte("RawQuery: " + r.URL.RawQuery))
|
|
|
|
return
|
|
|
|
}
|
2020-09-24 10:13:14 +00:00
|
|
|
if r.URL.Path == "/.passwd" {
|
|
|
|
w.Write([]byte("SECRETS!"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
http.Error(w, "not found on backend", http.StatusNotFound)
|
|
|
|
})
|
|
|
|
|
|
|
|
backend := httptest.NewServer(backendH)
|
|
|
|
defer backend.Close()
|
|
|
|
|
|
|
|
backendURL := backend.URL + "/some/prefix"
|
|
|
|
|
|
|
|
// Share one agent for all these test cases. This has a few nice side-effects:
|
|
|
|
// 1. it's cheaper
|
|
|
|
// 2. it implicitly tests that config reloading works between cases
|
|
|
|
//
|
|
|
|
// Note we can't test the case where UI is disabled though as that's not
|
|
|
|
// reloadable so we'll do that in a separate test below rather than have many
|
|
|
|
// new tests all with a new agent. response headers also aren't reloadable
|
|
|
|
// currently due to the way we wrap API endpoints at startup.
|
|
|
|
a := NewTestAgent(t, `
|
|
|
|
ui_config {
|
|
|
|
enabled = true
|
|
|
|
}
|
|
|
|
http_config {
|
|
|
|
response_headers {
|
|
|
|
"Access-Control-Allow-Origin" = "*"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
endpointPath := "/v1/internal/ui/metrics-proxy"
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
config config.UIMetricsProxy
|
|
|
|
path string
|
|
|
|
wantCode int
|
|
|
|
wantContains string
|
|
|
|
wantHeaders map[string]string
|
|
|
|
wantHeadersSent map[string]string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "disabled",
|
|
|
|
config: config.UIMetricsProxy{},
|
|
|
|
path: endpointPath + "/ok",
|
|
|
|
wantCode: http.StatusNotFound,
|
|
|
|
},
|
2020-10-30 21:49:54 +00:00
|
|
|
{
|
|
|
|
name: "blocked path",
|
|
|
|
config: config.UIMetricsProxy{
|
|
|
|
BaseURL: backendURL,
|
|
|
|
PathAllowlist: []string{"/some/other-prefix/ok"},
|
|
|
|
},
|
|
|
|
path: endpointPath + "/ok",
|
|
|
|
wantCode: http.StatusForbidden,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "allowed path",
|
|
|
|
config: config.UIMetricsProxy{
|
|
|
|
BaseURL: backendURL,
|
|
|
|
PathAllowlist: []string{"/some/prefix/ok"},
|
|
|
|
},
|
|
|
|
path: endpointPath + "/ok",
|
|
|
|
wantCode: http.StatusOK,
|
|
|
|
wantContains: "OK",
|
|
|
|
},
|
2020-09-24 10:13:14 +00:00
|
|
|
{
|
|
|
|
name: "basic proxying",
|
|
|
|
config: config.UIMetricsProxy{
|
|
|
|
BaseURL: backendURL,
|
|
|
|
},
|
|
|
|
path: endpointPath + "/ok",
|
|
|
|
wantCode: http.StatusOK,
|
|
|
|
wantContains: "OK",
|
|
|
|
wantHeaders: map[string]string{
|
|
|
|
"X-Random-Header": "Foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "404 on backend",
|
|
|
|
config: config.UIMetricsProxy{
|
|
|
|
BaseURL: backendURL,
|
|
|
|
},
|
|
|
|
path: endpointPath + "/random-path",
|
|
|
|
wantCode: http.StatusNotFound,
|
|
|
|
wantContains: "not found on backend",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Note that this case actually doesn't exercise our validation logic at
|
|
|
|
// all since the top level API mux resolves this to /v1/internal/.passwd
|
|
|
|
// and it never hits our handler at all. I left it in though as this
|
|
|
|
// wasn't obvious and it's worth knowing if we change something in our mux
|
|
|
|
// that might affect path traversal opportunity here. In fact this makes
|
|
|
|
// our path traversal handling somewhat redundant because any traversal
|
|
|
|
// that goes "back" far enough to traverse up from the BaseURL of the
|
|
|
|
// proxy target will in fact miss our handler entirely. It's still better
|
|
|
|
// to be safe than sorry though.
|
|
|
|
name: "path traversal should fail - api mux",
|
|
|
|
config: config.UIMetricsProxy{
|
|
|
|
BaseURL: backendURL,
|
|
|
|
},
|
|
|
|
path: endpointPath + "/../../.passwd",
|
|
|
|
wantCode: http.StatusMovedPermanently,
|
|
|
|
wantContains: "Moved Permanently",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "adding auth header",
|
|
|
|
config: config.UIMetricsProxy{
|
|
|
|
BaseURL: backendURL,
|
|
|
|
AddHeaders: []config.UIMetricsProxyAddHeader{
|
|
|
|
{
|
|
|
|
Name: "Authorization",
|
|
|
|
Value: "SECRET_KEY",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "X-Some-Other-Header",
|
|
|
|
Value: "foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
path: endpointPath + "/ok",
|
|
|
|
wantCode: http.StatusOK,
|
|
|
|
wantContains: "OK",
|
|
|
|
wantHeaders: map[string]string{
|
|
|
|
"X-Random-Header": "Foo",
|
|
|
|
},
|
|
|
|
wantHeadersSent: map[string]string{
|
|
|
|
"X-Some-Other-Header": "foo",
|
|
|
|
"Authorization": "SECRET_KEY",
|
|
|
|
},
|
|
|
|
},
|
2020-10-09 11:25:17 +00:00
|
|
|
{
|
|
|
|
name: "passes through query params",
|
|
|
|
config: config.UIMetricsProxy{
|
|
|
|
BaseURL: backendURL,
|
|
|
|
},
|
|
|
|
// encoded=test[0]&&test[1]==!@£$%^
|
|
|
|
path: endpointPath + "/query-echo?foo=bar&encoded=test%5B0%5D%26%26test%5B1%5D%3D%3D%21%40%C2%A3%24%25%5E",
|
|
|
|
wantCode: http.StatusOK,
|
|
|
|
wantContains: "RawQuery: foo=bar&encoded=test%5B0%5D%26%26test%5B1%5D%3D%3D%21%40%C2%A3%24%25%5E",
|
|
|
|
},
|
2020-09-24 10:13:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
tc := tc
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
// Reload the agent config with the desired UI config by making a copy and
|
|
|
|
// using internal reload.
|
|
|
|
cfg := *a.Agent.config
|
|
|
|
|
|
|
|
// Modify the UIConfig part (this is a copy remember and that struct is
|
|
|
|
// not a pointer)
|
|
|
|
cfg.UIConfig.MetricsProxy = tc.config
|
|
|
|
|
|
|
|
require.NoError(t, a.Agent.reloadConfigInternal(&cfg))
|
|
|
|
|
|
|
|
// Now fetch the API handler to run requests against
|
2023-06-30 13:10:20 +00:00
|
|
|
a.enableDebug.Store(true)
|
|
|
|
|
|
|
|
h := a.srv.handler()
|
2020-09-24 10:13:14 +00:00
|
|
|
|
|
|
|
req := httptest.NewRequest("GET", tc.path, nil)
|
|
|
|
rec := httptest.NewRecorder()
|
|
|
|
|
|
|
|
h.ServeHTTP(rec, req)
|
|
|
|
|
|
|
|
require.Equal(t, tc.wantCode, rec.Code,
|
|
|
|
"Wrong status code. Body = %s", rec.Body.String())
|
|
|
|
require.Contains(t, rec.Body.String(), tc.wantContains)
|
|
|
|
for k, v := range tc.wantHeaders {
|
|
|
|
// Headers are a slice of values, just assert that one of the values is
|
|
|
|
// the one we want.
|
|
|
|
require.Contains(t, rec.Result().Header[k], v)
|
|
|
|
}
|
|
|
|
if len(tc.wantHeadersSent) > 0 {
|
|
|
|
headersSent, ok := lastHeadersSent.Load().(http.Header)
|
|
|
|
require.True(t, ok, "backend not called")
|
|
|
|
for k, v := range tc.wantHeadersSent {
|
|
|
|
require.Contains(t, headersSent[k], v,
|
|
|
|
"header %s doesn't have the right value set", k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|