Add hardened cluster test

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
pull/6294/head
Brad Davidson 2 years ago committed by Brad Davidson
parent 4d3912a0f8
commit 3b0c6ff320

@ -0,0 +1,18 @@
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1beta1
kind: PodSecurityConfiguration
defaults:
enforce: "privileged"
enforce-version: "latest"
audit: "baseline"
audit-version: "latest"
warn: "baseline"
warn-version: "latest"
exemptions:
usernames: []
runtimeClasses: []
namespaces: [kube-system]

@ -0,0 +1,128 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example
namespace: default
labels:
app.kubernetes.io: example
spec:
selector:
matchLabels:
app.kubernetes.io/name: example
template:
metadata:
labels:
app.kubernetes.io/name: example
spec:
automountServiceAccountToken: false
securityContext:
runAsUser: 405
runAsGroup: 100
containers:
- name: socat
image: docker.io/alpine/socat:1.7.4.3-r1
args:
- "TCP-LISTEN:8080,reuseaddr,fork"
- "EXEC:echo -e 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n$(NODE_IP) $(POD_NAMESPACE)/$(POD_NAME)\r\n'"
ports:
- containerPort: 8080
name: http
env:
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 10
httpGet:
path: /
port: 8080
---
apiVersion: v1
kind: Service
metadata:
name: example
namespace: default
spec:
type: NodePort
selector:
app.kubernetes.io/name: example
ports:
- name: http
protocol: TCP
port: 80
nodePort: 30096
targetPort: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: example
spec:
rules:
- host: "example.com"
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: example
port:
name: http
---
# Allow access to example backend from traefik ingress
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: ingress-to-backend-example
namespace: default
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: example
ingress:
- ports:
- port: 8080
protocol: TCP
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
policyTypes:
- Ingress
---
# Allow access to example backend from outside the cluster via nodeport service
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: nodeport-to-backend-example
namespace: default
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: example
ingress:
- ports:
- port: 8080
protocol: TCP
- from:
- ipBlock:
cidr: 0.0.0.0/0
except:
- 10.42.0.0/16
- 10.43.0.0/16
policyTypes:
- Ingress

@ -0,0 +1,120 @@
---
# Allow all traffic within the kube-system namespace; block all other access
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: intra-namespace
namespace: kube-system
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
name: kube-system
policyTypes:
- Ingress
---
# Allow all traffic within the default namespace; block all other access
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: intra-namespace
namespace: default
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
name: default
policyTypes:
- Ingress
---
# Allow traffic within the kube-public namespace; block all other access
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: intra-namespace
namespace: kube-public
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
name: kube-public
policyTypes:
- Ingress
---
# Allow all access to metrics-server
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all-metrics-server
namespace: kube-system
spec:
podSelector:
matchLabels:
k8s-app: metrics-server
ingress:
- {}
policyTypes:
- Ingress
---
# Allow all access to coredns DNS ports
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-network-dns-policy
namespace: kube-system
spec:
ingress:
- ports:
- port: 53
protocol: TCP
- port: 53
protocol: UDP
podSelector:
matchLabels:
k8s-app: kube-dns
policyTypes:
- Ingress
---
# Allow all access to the the servicelb traefik HTTP/HTTPS ports
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all-svclb-traefik
namespace: kube-system
spec:
podSelector:
matchLabels:
svccontroller.k3s.cattle.io/svcname: traefik
ingress:
- ports:
- port: 80
protocol: TCP
- port: 443
protocol: TCP
policyTypes:
- Ingress
---
# Allow all access to traefik HTTP/HTTPS ports
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all-traefik
namespace: kube-system
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
ingress:
- ports:
- port: 8000
protocol: TCP
- port: 8443
protocol: TCP
policyTypes:
- Ingress

@ -0,0 +1,98 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export WAIT_SERVICES="${all_services[@]}"
export AGENT_ARGS="--selinux=true \
--protect-kernel-defaults=true \
--kubelet-arg=streaming-connection-idle-timeout=5m \
--kubelet-arg=make-iptables-util-chains=true"
export SERVER_ARGS="--selinux=true \
--protect-kernel-defaults=true \
--kubelet-arg=streaming-connection-idle-timeout=5m \
--kubelet-arg=make-iptables-util-chains=true \
--secrets-encryption=true \
--kube-apiserver-arg=audit-log-path=/tmp/audit-log \
--kube-apiserver-arg=audit-log-maxage=30 \
--kube-apiserver-arg=audit-log-maxbackup=10 \
--kube-apiserver-arg=audit-log-maxsize=100 \
--kube-apiserver-arg=request-timeout=300s \
--kube-apiserver-arg=service-account-lookup=true \
--kube-apiserver-arg=enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount \
--kube-apiserver-arg=admission-control-config-file=/opt/rancher/k3s/cluster-level-pss.yaml \
--kube-controller-manager-arg=terminated-pod-gc-threshold=10 \
--kube-controller-manager-arg=use-service-account-credentials=true"
# -- This test runs in docker mounting the docker socket,
# -- so we can't directly mount files into the test containers. Instead we have to
# -- run a dummy container with a volume, copy files into that volume, and then
# -- share it with the other containers that need the file.
cluster-pre-hook() {
mkdir -p $TEST_DIR/pause/0/metadata
local testID=$(basename $TEST_DIR)
local name=$(echo "k3s-pause-0-${testID,,}" | tee $TEST_DIR/pause/0/metadata/name)
export SERVER_DOCKER_ARGS="--mount type=volume,src=$name,dst=/opt/rancher/k3s"
docker run \
-d --name $name \
--hostname $name \
${SERVER_DOCKER_ARGS} \
rancher/mirrored-pause:3.6 \
>/dev/null
docker cp scripts/hardened/cluster-level-pss.yaml $name:/opt/rancher/k3s/cluster-level-pss.yaml
}
export -f cluster-pre-hook
# -- deploy and wait for a daemonset to run on all nodes, then wait a couple more
# -- seconds for traefik to see the service endpoints before testing.
start-test() {
find ./scripts/hardened/ -name 'hardened-k3s-*.yaml' -printf '-f\0%p\0' | xargs -tr0 kubectl create
kubectl rollout status daemonset/example --watch --timeout=5m
sleep 15
verify-ingress
verify-nodeport
}
export -f start-test
test-cleanup-hook(){
local testID=$(basename $TEST_DIR)
docker volume ls -q | grep -F ${testID,,} | xargs -r docker volume rm
}
export -f test-cleanup-hook
# -- confirm we can make a request through the ingress
verify-ingress() {
local ips=$(cat $TEST_DIR/{servers,agents}/*/metadata/ip)
local schemes="http https"
for ip in $ips; do
for scheme in $schemes; do
curl -vksf -H 'Host: example.com' ${scheme}://${ip}/
done
done
}
export -f verify-ingress
# -- confirm we can make a request through the nodeport service
verify-nodeport() {
local ips=$(cat $TEST_DIR/{servers,agents}/*/metadata/ip)
local ports=$(kubectl get service/example -o 'jsonpath={.spec.ports[*].nodePort}')
for ip in $ips; do
for port in $ports; do
curl -vksf -H 'Host: example.com' http://${ip}:${port}
done
done
}
export -f verify-nodeport
# --- create a basic cluster and check for functionality
LABEL=HARDENED run-test
cleanup-test-env
Loading…
Cancel
Save