Add integration test for cleaning up resolvers

pull/16498/head
Andrew Stucki 2 years ago
parent e14b4301fa
commit 525501337d

@ -0,0 +1,4 @@
#!/bin/bash
snapshot_envoy_admin localhost:20000 terminating-gateway primary || true
snapshot_envoy_admin localhost:19000 s1 primary || true

@ -0,0 +1,9 @@
services {
name = "terminating-gateway"
kind = "terminating-gateway"
port = 8443
meta {
version = "v1"
}
}

@ -0,0 +1,9 @@
services {
id = "s2-v1"
name = "s2"
port = 8182
meta {
version = "v1"
}
}

@ -0,0 +1,9 @@
services {
id = "s2-v2"
name = "s2"
port = 8183
meta {
version = "v2"
}
}

@ -0,0 +1,41 @@
#!/bin/bash
set -euo pipefail
upsert_config_entry primary '
kind = "terminating-gateway"
name = "terminating-gateway"
services = [
{
name = "s2"
}
]
'
upsert_config_entry primary '
kind = "proxy-defaults"
name = "global"
config {
protocol = "http"
}
'
upsert_config_entry primary '
kind = "service-resolver"
name = "s2"
default_subset = "v1"
subsets = {
"v1" = {
filter = "Service.Meta.version == v1"
}
"v2" = {
filter = "Service.Meta.version == v2"
}
}
'
register_services primary
# terminating gateway will act as s2's proxy
gen_envoy_bootstrap s1 19000
gen_envoy_bootstrap terminating-gateway 20000 primary true

@ -0,0 +1,8 @@
#!/bin/bash
# There is no sidecar proxy for s2-v1, since the terminating gateway acts as the proxy
export REQUIRED_SERVICES="
s1 s1-sidecar-proxy
s2-v1
terminating-gateway-primary
"

@ -0,0 +1,50 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "terminating proxy admin is up on :20000" {
retry_default curl -f -s localhost:20000/stats -o /dev/null
}
@test "terminating-gateway-primary listener is up on :8443" {
retry_default nc -z localhost:8443
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s1 upstream should have healthy endpoints for v1.s2" {
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 v1.s2 HEALTHY 1
}
@test "terminating-gateway should have healthy endpoints for v1.s2" {
assert_upstream_has_endpoints_in_status 127.0.0.1:20000 v1.s2 HEALTHY 1
}
@test "terminating-gateway should have healthy endpoints for v2.s2" {
assert_upstream_has_endpoints_in_status 127.0.0.1:20000 v2.s2 HEALTHY 1
}
@test "deleting the service-resolver should be possible" {
delete_config_entry service-resolver s2
}
@test "terminating gateway should no longer have v1.s2 endpoints" {
assert_upstream_missing 127.0.0.1:20000 v1.s2
}
@test "terminating gateway should no longer have v2.s2 endpoints" {
assert_upstream_missing 127.0.0.1:20000 v2.s2
}
@test "terminating gateway should still have s2 endpoints" {
# expected 3 nodes here due to s1, s2, s2-v1, and s2-v2, the latter
# all starting with "s2"
assert_upstream_has_endpoints_in_status 127.0.0.1:20000 s2 HEALTHY 3
}

@ -383,15 +383,27 @@ function assert_upstream_has_endpoints_in_status_once {
GOT_COUNT=$(get_upstream_endpoint_in_status_count $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS)
echo "GOT: $GOT_COUNT"
[ "$GOT_COUNT" -eq $EXPECT_COUNT ]
}
function assert_upstream_missing_once {
local HOSTPORT=$1
local CLUSTER_NAME=$2
run get_upstream_endpoint $HOSTPORT $CLUSTER_NAME
[ "$status" -eq 0 ]
echo "$output"
[ "" == "$output" ]
}
function assert_upstream_missing {
local HOSTPORT=$1
local CLUSTER_NAME=$2
run retry_default get_upstream_endpoint $HOSTPORT $CLUSTER_NAME
run retry_long assert_upstream_missing_once $HOSTPORT $CLUSTER_NAME
echo "OUTPUT: $output $status"
[ "" == "$output" ]
[ "$status" -eq 0 ]
}
function assert_upstream_has_endpoints_in_status {
@ -400,6 +412,8 @@ function assert_upstream_has_endpoints_in_status {
local HEALTH_STATUS=$3
local EXPECT_COUNT=$4
run retry_long assert_upstream_has_endpoints_in_status_once $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS $EXPECT_COUNT
echo "$output"
[ "$status" -eq 0 ]
}

Loading…
Cancel
Save