e2e tests: cover WebAssembly integration

Add a e2e test that runs some demo WebAssembly applications
using the dedicated containerd shims.

Note: this is not an integration test because we need to install some
binaries (the special containerd shims) on the host.

Signed-off-by: Flavio Castelli <fcastelli@suse.com>
pull/9648/head
Flavio Castelli 9 months ago committed by Brad Davidson
parent 64e4f0e6e7
commit f82d438f39

@ -0,0 +1,126 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: wasm-slight
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: wasm-slight
template:
metadata:
labels:
app: wasm-slight
spec:
runtimeClassName: slight
containers:
- name: slight-hello
image: ghcr.io/deislabs/containerd-wasm-shims/examples/slight-rust-hello:v0.9.1
command: ["/"]
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: wasm-spin
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: wasm-spin
template:
metadata:
labels:
app: wasm-spin
spec:
runtimeClassName: spin
containers:
- name: spin-hello
image: ghcr.io/deislabs/containerd-wasm-shims/examples/spin-rust-hello:v0.11.1
command: ["/"]
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
---
# create a traefik middleware
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: strip-prefix
namespace: default
spec:
stripPrefix:
forceSlash: false
prefixes:
- /spin
- /slight
---
# define the slight service
apiVersion: v1
kind: Service
metadata:
name: wasm-slight
namespace: default
spec:
ports:
- protocol: TCP
port: 80
targetPort: 3000
selector:
app: wasm-slight
---
# define the spin service
apiVersion: v1
kind: Service
metadata:
name: wasm-spin
namespace: default
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
app: wasm-spin
---
# define a single ingress, that exposes both services
# using a path route
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress
namespace: default
annotations:
ingress.kubernetes.io/ssl-redirect: "false"
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.middlewares: default-strip-prefix@kubernetescrd
spec:
rules:
- http:
paths:
- path: /slight
pathType: Prefix
backend:
service:
name: wasm-slight
port:
number: 80
- path: /spin
pathType: Prefix
backend:
service:
name: wasm-spin
port:
number: 80

@ -0,0 +1,96 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""
SPIN_DOWNLOAD_URL = "https://github.com/deislabs/containerd-wasm-shims/releases/download/v0.11.1/containerd-wasm-shims-v2-spin-linux-x86_64.tar.gz"
SLIGHT_DOWNLOAD_URL = "https://github.com/deislabs/containerd-wasm-shims/releases/download/v0.11.1/containerd-wasm-shims-v1-slight-linux-x86_64.tar.gz"
INSTALL_WASM_SHIMS = <<-SCRIPT
curl -fsSL -o spin.tar.gz #{SPIN_DOWNLOAD_URL}
tar xf spin.tar.gz
curl -fsSL -o slight.tar.gz #{SLIGHT_DOWNLOAD_URL}
tar xf slight.tar.gz
mv containerd-shim-* /usr/bin
rm *tar.gz
SCRIPT
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "shell", inline: "ping -c 2 k3s.io"
vm.provision "Install run-wasi containerd shims", type: "shell", inline: INSTALL_WASM_SHIMS
if role.include?("server") && role_num == 0
dockerInstall(vm)
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server "
k3s.config = <<~YAML
cluster-init: true
node-external-ip: #{NETWORK_PREFIX}.100
token: vagrant
flannel-iface: eth1
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
end
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end
# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
provision(node.vm, name, role_num, i)
end
end
end

@ -0,0 +1,145 @@
package wasm
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var agentCount = flag.Int("agentCount", 0, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
func Test_E2EWasm(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Run WebAssenbly Workloads Test Suite", suiteConfig, reporterConfig)
}
var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify Can run Wasm workloads", Ordered, func() {
It("Starts up with no issues", func() {
var err error
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
// Server node needs to be ready before we continue
It("Checks Node and Pod Status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching Pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})
It("Verify wasm-related containerd shims are installed", func() {
expected_shims := []string{"containerd-shim-spin-v2", "containerd-shim-slight-v1"}
for _, node := range append(serverNodeNames, agentNodeNames...) {
for _, shim := range expected_shims {
cmd := fmt.Sprintf("which %s", shim)
_, err := e2e.RunCmdOnNode(cmd, node)
Expect(err).NotTo(HaveOccurred())
}
}
})
Context("Verify Wasm workloads can run on the cluster", func() {
It("Deploy Wasm workloads", func() {
out, err := e2e.DeployWorkload("wasm-workloads.yaml", kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred(), out)
})
It("Wait for slight Pod to be up and running", func() {
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l app=wasm-slight --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("pod/wasm-slight"))
})
It("Wait for spin Pod to be up and running", func() {
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l app=wasm-spin --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("pod/wasm-spin"))
})
It("Interact with Wasm applications", func() {
ingressIPs, err := e2e.FetchIngressIP(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
Expect(ingressIPs).To(HaveLen(1))
endpoints := []string{"slight/hello", "spin/go-hello", "spin/hello"}
for _, endpoint := range endpoints {
url := fmt.Sprintf("http://%s/%s", ingressIPs[0], endpoint)
fmt.Printf("Connecting to Wasm web application at: %s\n", url)
cmd := "curl -sfv " + url
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("200 OK"))
}
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})
Loading…
Cancel
Save