[Release-1.25] Consolidate E2E tests (#6887)

* Consolidate E2E tests and GH Actions (#6772)

* Consolidate cluster reset and snapshot E2E tests
* Add more context to secrets-encryption test
* Reuse build workflow
* Convert updatecli to job level permissions
* Remove dweomer microos from E2E and install testing

Signed-off-by: Derek Nola <derek.nola@suse.com>

* E2E: Consoldiate docker and prefer bundled tests into new startup test (#6851)

* Convert docker E2E to startup E2E
* Move preferedbundled into the e2e startup test

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Update nightly channel

Signed-off-by: Derek Nola <derek.nola@suse.com>

---------

Signed-off-by: Derek Nola <derek.nola@suse.com>
pull/6904/head
Derek Nola 2023-02-06 14:58:32 -08:00 committed by GitHub
parent f4a75678d8
commit 8fc229521a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 453 additions and 752 deletions

View File

@ -6,29 +6,18 @@ on:
- "channel.yaml"
- "install.sh"
- "tests/install/**"
- ".github/workflows/install.yaml"
pull_request:
branches: [main, master]
paths:
- "install.sh"
- "tests/install/**"
- ".github/workflows/install.yaml"
workflow_dispatch: {}
jobs:
build:
name: Build
runs-on: ubuntu-20.04
timeout-minutes: 20
steps:
- name: "Checkout"
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: "Make"
run: DOCKER_BUILDKIT=1 SKIP_VALIDATE=1 make
- name: "Upload k3s binary"
uses: actions/upload-artifact@v3
with:
name: k3s
path: ./dist/artifacts/k3s
uses: ./.github/workflows/build-k3s.yaml
test:
name: "Smoke Test"
needs: build
@ -37,7 +26,7 @@ jobs:
strategy:
fail-fast: false
matrix:
vm: [centos-7, rocky-8, fedora, opensuse-leap, opensuse-microos, ubuntu-focal]
vm: [centos-7, rocky-8, fedora, opensuse-leap, ubuntu-focal]
max-parallel: 2
defaults:
run:

View File

@ -11,8 +11,8 @@ jobs:
strategy:
fail-fast: false
matrix:
channel: [stable]
vm: [centos-7, rocky-8, fedora, opensuse-leap, opensuse-microos, ubuntu-focal]
channel: [v1.25]
vm: [centos-7, rocky-8, fedora, opensuse-leap, ubuntu-focal]
include:
- {channel: latest, vm: rocky-8}
- {channel: latest, vm: ubuntu-focal}

View File

@ -20,22 +20,11 @@ on:
- "!.github/workflows/snapshotter.yaml"
workflow_dispatch: {}
jobs:
prep:
name: "Prepare"
runs-on: ubuntu-20.04
timeout-minutes: 40
steps:
- name: "Checkout"
uses: actions/checkout@v3
with: { fetch-depth: 1 }
- name: "Build"
run: DOCKER_BUILDKIT=1 SKIP_VALIDATE=1 make
- name: "Upload Binary"
uses: actions/upload-artifact@v3
with: { name: k3s, path: dist/artifacts/k3s }
build:
uses: ./.github/workflows/build-k3s.yaml
test:
name: "Smoke Test"
needs: prep
needs: build
# nested virtualization is only available on macOS hosts
runs-on: macos-12
timeout-minutes: 40

View File

@ -1,136 +0,0 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "server-1", "server-2", "agent-0", "agent-1"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
HARDENED = (ENV['E2E_HARDENED'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 1024).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""
hardened_arg = ""
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults
defaultOSConfigure(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "shell", inline: "ping -c 2 k3s.io"
db_type = getDBType(role, role_num, vm)
if !HARDENED.empty?
vm.provision "Set kernel parameters", type: "shell", path: scripts_location + "/harden.sh"
hardened_arg = "protect-kernel-defaults: true\nkube-apiserver-arg: \"enable-admission-plugins=NodeRestriction,PodSecurityPolicy,ServiceAccount\""
end
if role.include?("server") && role_num == 0
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server "
k3s.config = <<~YAML
token: vagrant
node-external-ip: #{NETWORK_PREFIX}.100
flannel-iface: eth1
tls-san: #{NETWORK_PREFIX}.100.nip.io
#{db_type}
#{hardened_arg}
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
elsif role.include?("server") && role_num != 0
vm.provision 'k3s-secondary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server"
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
#{db_type}
#{hardened_arg}
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if role.include?("agent")
vm.provision 'k3s-agent', type: 'k3s', run: 'once' do |k3s|
k3s.args = "agent"
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
#{db_type}
#{hardened_arg}
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
if !EXTERNAL_DB.empty?
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}"
end
end
end
def getDBType(role, role_num, vm)
if ( EXTERNAL_DB == "" || EXTERNAL_DB == "etcd" )
if role.include?("server") && role_num == 0
return "cluster-init: true"
end
elsif ( EXTERNAL_DB == "none" )
# Use internal sqlite, only valid for single node clusters
else
puts "Unknown EXTERNAL_DB: " + EXTERNAL_DB
abort
end
return ""
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end
# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
provision(node.vm, name, role_num, i)
end
end
end

View File

@ -1,187 +0,0 @@
package clusterreset
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS:
// generic/ubuntu2004, generic/centos7, generic/rocky8,
// opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
func Test_E2EClusterReset(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "ClusterReset Test Suite", suiteConfig, reporterConfig)
}
var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify Create", Ordered, func() {
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Node and Pod Status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching Pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})
It("Verifies ClusterReset Functionality", func() {
Eventually(func(g Gomega) {
for _, nodeName := range serverNodeNames {
if nodeName != "server-0" {
cmd := "sudo systemctl stop k3s"
_, err := e2e.RunCmdOnNode(cmd, nodeName)
Expect(err).NotTo(HaveOccurred())
}
}
cmd := "sudo systemctl stop k3s"
_, err := e2e.RunCmdOnNode(cmd, "server-0")
Expect(err).NotTo(HaveOccurred())
cmd = "sudo k3s server --cluster-reset"
res, err := e2e.RunCmdOnNode(cmd, "server-0")
Expect(err).NotTo(HaveOccurred())
Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now"))
cmd = "sudo systemctl start k3s"
_, err = e2e.RunCmdOnNode(cmd, "server-0")
Expect(err).NotTo(HaveOccurred())
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
if strings.Contains(node.Name, "server-0") || strings.Contains(node.Name, "agent-") {
g.Expect(node.Status).Should(Equal("Ready"))
} else {
g.Expect(node.Status).Should(Equal("NotReady"))
}
}
}, "480s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching Pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
for _, nodeName := range serverNodeNames {
if nodeName != "server-0" {
cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db"
_, err := e2e.RunCmdOnNode(cmd, nodeName)
Expect(err).NotTo(HaveOccurred())
cmd = "sudo systemctl restart k3s"
_, err = e2e.RunCmdOnNode(cmd, nodeName)
Expect(err).NotTo(HaveOccurred())
}
}
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching Pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
}, "240s", "5s").Should(Succeed())
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})

View File

@ -1,76 +0,0 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['NODE_ROLES'] ||
["server-0", "agent-0"])
NODE_BOXES = (ENV['NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['NODE_MEMORY'] || 1024).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
vagrant_defaults = '../vagrantdefaults.rb'
load vagrant_defaults if File.exists?(vagrant_defaults)
defaultOSConfigure(vm)
dockerInstall(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "shell", inline: "ping -c 2 k3s.io"
if role.include?("server")
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[server --node-external-ip=#{NETWORK_PREFIX}.100 --flannel-iface=eth1 --docker]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
elsif role.include?("agent")
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1 --docker]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
end
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end
# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
provision(node.vm, name, role_num, i)
end
end
end

View File

@ -1,92 +0,0 @@
package docker
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
func Test_E2EDockerCRIValidation(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Docker CRI Test Suite", suiteConfig, reporterConfig)
}
var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify CRI-Dockerd", Ordered, func() {
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks node and pod status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})

View File

@ -1,92 +0,0 @@
package preferbundled
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
func Test_E2EPreferBundled(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Prefer Bundled Binaries Test Suite", suiteConfig, reporterConfig)
}
var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify prefer-bundled-bin flag", Ordered, func() {
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks node and pod status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})

View File

@ -1,57 +1,49 @@
#!/bin/bash
servercount=${5:-3}
agentcount=${6:-1}
db=${7:-"etcd"}
k3s_version=${k3s_version}
k3s_channel=${k3s_channel:-"commit"}
hardened=${8:-""}
nodeOS=${1:-"generic/ubuntu2004"}
servercount=${2:-3}
agentcount=${3:-1}
db=${4:-"etcd"}
hardened=${5:-""}
E2E_EXTERNAL_DB=$db && export E2E_EXTERNAL_DB
E2E_REGISTRY=true && export E2E_REGISTRY
eval openvpn --daemon --config external.ovpn &>/dev/null &
sleep 10
cd
cd k3s && git pull --rebase origin master
/usr/local/go/bin/go mod tidy
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s && git pull --rebase origin master'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s && /usr/local/go/bin/go mod tidy'
cd tests/e2e
OS=$(echo "$nodeOS"|cut -d'/' -f2)
echo "$OS"
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e/dualstack && vagrant global-status | awk '/running/'|cut -c1-7| xargs -r -d '\n' -n 1 -- vagrant destroy -f"
vagrant global-status | awk '/running/'|cut -c1-7| xargs -r -d '\n' -n 1 -- vagrant destroy -f
E2E_RELEASE_CHANNEL="commit" && export E2E_RELEASE_CHANNEL
echo 'RUNNING DUALSTACK VALIDATION TEST'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e && E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v dualstack/dualstack_test.go -nodeOS="$4" -serverCount=$((servercount)) -agentCount=$((agentcount)) -timeout=30m -json" | tee -a testreport.log
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/dualstack && vagrant destroy -f'
echo 'RUNNING DUALSTACK TEST'
E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v dualstack/dualstack_test.go -nodeOS="$nodeOS" -serverCount=1 -agentCount=1 -timeout=30m -json -ci |tee k3s_"$OS".log
echo 'RUNNING CLUSTER VALIDATION TEST'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e && E2E_REGISTRY=true E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v validatecluster/validatecluster_test.go -nodeOS="$4" -serverCount=$((servercount)) -agentCount=$((agentcount)) -timeout=30m -json" | tee -a testreport.log
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/validatecluster && vagrant destroy -f'
E2E_REGISTRY=true E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v validatecluster/validatecluster_test.go -nodeOS="$nodeOS" -serverCount=$((servercount)) -agentCount=$((agentcount)) -timeout=30m -json -ci | tee -a k3s_"$OS".log
echo 'RUNNING SECRETS ENCRYPTION TEST'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e && /usr/local/go/bin/go test -v secretsencryption/secretsencryption_test.go -nodeOS="$4" -serverCount=$((servercount)) -timeout=30m -json" | tee -a testreport.log
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/secretsencryption && vagrant destroy -f'
echo 'RUNNING SNAPSHOT RESTORE TEST'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e && /usr/local/go/bin/go test -v snapshotrestore/snapshotrestore_test.go -nodeOS="$4" -serverCount=$((servercount)) -agentCount=$((agentcount)) -timeout=30m -json" | tee -a testreport.log
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/secretsencryption && vagrant destroy -f'
echo 'RUNNING SPLIT SERVER VALIDATION TEST'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e && E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v splitserver/splitserver_test.go -nodeOS="$4" -timeout=30m -json" | tee -a testreport.log
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/splitserver && vagrant destroy -f'
E2E_RELEASE_VERSION=$k3s_version && export E2E_RELEASE_VERSION
E2E_RELEASE_CHANNEL=$k3s_channel && export E2E_RELEASE_CHANNEL
echo 'RUNNING CLUSTER UPGRADE TEST'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e && E2E_REGISTRY=true /usr/local/go/bin/go test -v upgradecluster/upgradecluster_test.go -nodeOS="$4" -serverCount=$((servercount)) -agentCount=$((agentcount)) -timeout=30m -json" | tee -a testreport.log
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/upgradecluster && vagrant destroy -f'
/usr/local/go/bin/go test -v secretsencryption/secretsencryption_test.go -nodeOS="$nodeOS" -serverCount=$((servercount)) -timeout=1h -json -ci | tee -a k3s_"$OS".log
echo 'RUN CLUSTER RESET TEST'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e && /usr/local/go/bin/go test -v clusterreset/clusterreset_test.go -nodeOS="$4" -serverCount=$((servercount)) -agentCount=$((agentcount)) -timeout=30m -json" | tee -a testreport.log
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/clusterreset && vagrant destroy -f'
/usr/local/go/bin/go test -v clusterreset/clusterreset_test.go -nodeOS="$nodeOS" -serverCount=3 -agentCount=1 -timeout=30m -json -ci | tee -a createreport/k3s_"$OS".log
echo 'RUNNING DOCKER CRI VALIDATION TEST'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e && /usr/local/go/bin/go test -v docker/docker_test.go -nodeOS="$4" -serverCount=1 -agentCount=1 -timeout=30m -json" | tee -a testreport.log
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/docker && vagrant destroy -f'
echo 'RUNNING SPLIT SERVER VALIDATION TEST'
E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v splitserver/splitserver_test.go -nodeOS="$nodeOS" -timeout=30m -json -ci | tee -a k3s_"$OS".log
echo 'RUNNING EXTERNALIP VALIDATION TEST'
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 "cd k3s/tests/e2e && E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v externalip/externalip_test.go -nodeOS="$4" -serverCount=1 -agentCount=1 -timeout=30m -json" | tee -a testreport.log
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/dualstack && vagrant destroy -f'
echo 'RUNNING STARTUP VALIDATION TEST'
/usr/local/go/bin/go test -v startup/startup_test.go -nodeOS="$nodeOS" -timeout=30m -json -ci | tee -a k3s_"$OS".log
echo 'RUNNING EXTERNAL IP TEST'
/usr/local/go/bin/go test -v externalip/externalip_test.go -nodeOS="$nodeOS" -serverCount=1 -agentCount=1 -timeout=30m -json -ci | tee -a k3s_"$OS".log
echo 'RUNNING SNAPSHOT AND RESTORE TEST'
/usr/local/go/bin/go test -v snapshotrestore/snapshotrestore_test.go -nodeOS="$nodeOS" -serverCount=1 -agentCount=1 -timeout=30m -json -ci | tee -a k3s_"$OS".log
E2E_RELEASE_CHANNEL="latest" && export E2E_RELEASE_CHANNEL
echo 'RUNNING CLUSTER UPGRADE TEST'
E2E_REGISTRY=true /usr/local/go/bin/go test -v upgradecluster/upgradecluster_test.go -nodeOS="$nodeOS" -serverCount=$((servercount)) -agentCount=$((agentcount)) -timeout=1h -json -ci | tee -a k3s_"$OS".log

View File

@ -12,7 +12,7 @@ import (
. "github.com/onsi/gomega"
)
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var hardened = flag.Bool("hardened", false, "true or false")
@ -35,7 +35,7 @@ var (
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
Context("Cluster :", func() {
Context("Secrets Keys are rotated:", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
@ -218,73 +218,78 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
})
})
It("Disables encryption", func() {
cmd := "sudo k3s secrets-encrypt disable"
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), res)
Context("Disabling Secrets-Encryption", func() {
It("Disables encryption", func() {
cmd := "sudo k3s secrets-encrypt disable"
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), res)
cmd = "sudo k3s secrets-encrypt reencrypt -f --skip"
res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), res)
cmd = "sudo k3s secrets-encrypt reencrypt -f --skip"
res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), res)
cmd = "sudo k3s secrets-encrypt status"
Eventually(func() (string, error) {
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
}, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
cmd = "sudo k3s secrets-encrypt status"
Eventually(func() (string, error) {
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
}, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
for i, nodeName := range serverNodeNames {
Eventually(func(g Gomega) {
res, err := e2e.RunCmdOnNode(cmd, nodeName)
g.Expect(err).NotTo(HaveOccurred(), res)
if i == 0 {
g.Expect(res).Should(ContainSubstring("Encryption Status: Disabled"))
} else {
g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled"))
}
}, "420s", "2s").Should(Succeed())
}
})
It("Restarts K3s servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
})
It("Verifies encryption disabled on all nodes", func() {
cmd := "sudo k3s secrets-encrypt status"
for _, nodeName := range serverNodeNames {
Eventually(func(g Gomega) {
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled"))
}, "420s", "2s").Should(Succeed())
}
})
for i, nodeName := range serverNodeNames {
Eventually(func(g Gomega) {
res, err := e2e.RunCmdOnNode(cmd, nodeName)
g.Expect(err).NotTo(HaveOccurred(), res)
if i == 0 {
g.Expect(res).Should(ContainSubstring("Encryption Status: Disabled"))
} else {
g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled"))
}
}, "420s", "2s").Should(Succeed())
}
})
It("Restarts K3s servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
})
Context("Enabling Secrets-Encryption", func() {
It("Enables encryption", func() {
cmd := "sudo k3s secrets-encrypt enable"
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), res)
It("Verifies encryption disabled on all nodes", func() {
cmd := "sudo k3s secrets-encrypt status"
for _, nodeName := range serverNodeNames {
Eventually(func(g Gomega) {
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled"))
}, "420s", "2s").Should(Succeed())
}
})
cmd = "sudo k3s secrets-encrypt reencrypt -f --skip"
res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), res)
It("Enables encryption", func() {
cmd := "sudo k3s secrets-encrypt enable"
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), res)
cmd = "sudo k3s secrets-encrypt status"
Eventually(func() (string, error) {
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
}, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
})
cmd = "sudo k3s secrets-encrypt reencrypt -f --skip"
res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), res)
It("Restarts K3s servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
})
cmd = "sudo k3s secrets-encrypt status"
Eventually(func() (string, error) {
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
}, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
})
It("Restarts K3s servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
})
It("Verifies encryption enabled on all nodes", func() {
cmd := "sudo k3s secrets-encrypt status"
for _, nodeName := range serverNodeNames {
Eventually(func(g Gomega) {
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled"))
}, "420s", "2s").Should(Succeed())
}
It("Verifies encryption enabled on all nodes", func() {
cmd := "sudo k3s secrets-encrypt status"
for _, nodeName := range serverNodeNames {
Eventually(func(g Gomega) {
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled"))
}, "420s", "2s").Should(Succeed())
}
})
})
})

View File

@ -14,7 +14,7 @@ import (
// Valid nodeOS:
// generic/ubuntu2004, generic/centos7, generic/rocky8,
// opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
// opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
@ -43,7 +43,7 @@ var (
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify Create", Ordered, func() {
var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
@ -122,26 +122,139 @@ var _ = Describe("Verify Create", Ordered, func() {
}, "240s", "5s").Should(Succeed())
})
It("Verifies snapshot is restored successfully and validates only test workload1 is present", func() {
It("Resets the cluster", func() {
for _, nodeName := range serverNodeNames {
cmd := "sudo systemctl stop k3s"
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
if nodeName != serverNodeNames[0] {
cmd = "k3s-killall.sh"
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
}
}
cmd := "sudo k3s server --cluster-reset"
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now"))
cmd = "sudo systemctl start k3s"
Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred())
})
It("Checks that other servers are not ready", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
if strings.Contains(node.Name, serverNodeNames[0]) || strings.Contains(node.Name, "agent-") {
g.Expect(node.Status).Should(Equal("Ready"))
} else {
g.Expect(node.Status).Should(Equal("NotReady"))
}
}
}, "240s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
})
It("Rejoins other servers to cluster", func() {
// We must remove the db directory on the other servers before restarting k3s
// otherwise the nodes may join the old cluster
for _, nodeName := range serverNodeNames[1:] {
cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db"
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
}
for _, nodeName := range serverNodeNames[1:] {
cmd := "sudo systemctl start k3s"
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
}
})
It("Checks that all nodes and pods are ready", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching Pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
})
It("Verifies that workload1 and workload1 exist", func() {
cmd := "kubectl get pods --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
Expect(res).Should(ContainSubstring("test-clusterip"))
Expect(res).Should(ContainSubstring("test-nodeport"))
})
It("Restores the snapshot", func() {
//Stop k3s on all nodes
for _, nodeName := range serverNodeNames {
cmd := "sudo systemctl stop k3s"
_, err := e2e.RunCmdOnNode(cmd, nodeName)
Expect(err).NotTo(HaveOccurred())
}
//Restores from snapshot on server-0
for _, nodeName := range serverNodeNames {
if nodeName == "server-0" {
cmd := "sudo k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname
res, err := e2e.RunCmdOnNode(cmd, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now"))
cmd = "sudo systemctl start k3s"
_, err = e2e.RunCmdOnNode(cmd, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
if nodeName != serverNodeNames[0] {
cmd = "k3s-killall.sh"
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
}
}
//Restores from snapshot on server-0
cmd := "sudo k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now"))
cmd = "sudo systemctl start k3s"
Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred())
})
It("Checks that other servers are not ready", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
if strings.Contains(node.Name, serverNodeNames[0]) || strings.Contains(node.Name, "agent-") {
g.Expect(node.Status).Should(Equal("Ready"))
} else {
g.Expect(node.Status).Should(Equal("NotReady"))
}
}
}, "240s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
})
It("Rejoins other servers to cluster", func() {
// We must remove the db directory on the other servers before restarting k3s
// otherwise the nodes may join the old cluster
for _, nodeName := range serverNodeNames[1:] {
cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db"
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
}
for _, nodeName := range serverNodeNames[1:] {
cmd := "sudo systemctl start k3s"
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
}
})
It("Checks that all nodes and pods are ready", func() {
//Verifies node is up and pods running
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
@ -166,8 +279,9 @@ var _ = Describe("Verify Create", Ordered, func() {
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
//Verifies test workload1 is present
//Verifies test workload2 is not present
})
It("Verifies that workload1 exists and workload2 does not", func() {
cmd := "kubectl get pods --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())

View File

@ -13,7 +13,7 @@ import (
. "github.com/onsi/gomega"
)
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var etcdCount = flag.Int("etcdCount", 1, "number of server nodes only deploying etcd")
var controlPlaneCount = flag.Int("controlPlaneCount", 1, "number of server nodes acting as control plane")

View File

@ -1,12 +1,12 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['NODE_ROLES'] ||
["server-0", "agent-0"])
NODE_BOXES = (ENV['NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['NODE_MEMORY'] || 1024).to_i
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""
@ -21,32 +21,33 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults if File.exists?(vagrant_defaults)
defaultOSConfigure(vm)
dockerInstall(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.provision "shell", inline: "ping -c 2 k3s.io"
if role.include?("server")
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server"
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.args = "server "
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type} INSTALL_K3S_SKIP_START=true]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.config = <<~YAML
token: vagrant
flannel-iface: eth1
node-external-ip: #{NETWORK_PREFIX}.100
prefer-bundled-bin: true
flannel-iface: eth1
YAML
end
elsif role.include?("agent")
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = "agent"
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.args = "agent "
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type} INSTALL_K3S_SKIP_START=true]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
prefer-bundled-bin: true
YAML
end
end

View File

@ -0,0 +1,187 @@
package startup
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var ci = flag.Bool("ci", false, "running on CI")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
// This test suite is used to verify that K3s can start up with dynamic configurations that require
// both server and agent nodes. It is unique in passing dynamic arguments to vagrant, unlike the
// rest of the E2E tests, which use static Vagrantfiles and cluster configurations.
// If you have a server only flag, the startup integration test is a better place to test it.
func Test_E2EStartupValidation(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Startup Test Suite", suiteConfig, reporterConfig)
}
var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
func StartK3sCluster(nodes []string, serverYAML string, agentYAML string) error {
for _, node := range nodes {
var yamlCmd string
var resetCmd string
var startCmd string
if strings.Contains(node, "server") {
resetCmd = "sudo head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
yamlCmd = fmt.Sprintf("sudo echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML)
startCmd = "sudo systemctl start k3s"
} else {
resetCmd = "sudo head -n 4 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
yamlCmd = fmt.Sprintf("sudo echo '%s' >> /etc/rancher/k3s/config.yaml", agentYAML)
startCmd = "sudo systemctl start k3s-agent"
}
if _, err := e2e.RunCmdOnNode(resetCmd, node); err != nil {
return err
}
if _, err := e2e.RunCmdOnNode(yamlCmd, node); err != nil {
return err
}
if _, err := e2e.RunCmdOnNode(startCmd, node); err != nil {
return &e2e.NodeError{Node: node, Cmd: startCmd, Err: err}
}
}
return nil
}
func KillK3sCluster(nodes []string) error {
for _, node := range nodes {
if _, err := e2e.RunCmdOnNode("sudo k3s-killall.sh", node); err != nil {
return err
}
}
return nil
}
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Various Startup Configurations", Ordered, func() {
Context("Verify CRI-Dockerd :", func() {
It("Stands up the nodes", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 1)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})
It("Starts K3s with no issues", func() {
dockerYAML := "docker: true"
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), dockerYAML, dockerYAML)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks node and pod status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})
It("Kills the cluster", func() {
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...))
Expect(err).NotTo(HaveOccurred())
})
})
Context("Verify prefer-bundled-bin flag", func() {
It("Starts K3s with no issues", func() {
preferBundledYAML := "prefer-bundled-bin: true"
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), preferBundledYAML, preferBundledYAML)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks node and pod status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})
It("Kills the cluster", func() {
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...))
Expect(err).NotTo(HaveOccurred())
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})

View File

@ -123,7 +123,11 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
return nil
})
// We must wait a bit between provisioning nodes to avoid too many learners attempting to join the cluster
time.Sleep(20 * time.Second)
if strings.Contains(node, "agent") {
time.Sleep(5 * time.Second)
} else {
time.Sleep(30 * time.Second)
}
}
if err := errg.Wait(); err != nil {
return nil, nil, err
@ -304,7 +308,7 @@ func GetVagrantLog(cErr error) string {
var nodeErr *NodeError
nodeJournal := ""
if errors.As(cErr, &nodeErr) {
nodeJournal, _ = RunCommand("vagrant ssh " + nodeErr.Node + " -c \"sudo journalctl -u k3s* --no-pager\"")
nodeJournal, _ = RunCmdOnNode("sudo journalctl -u k3s* --no-pager", nodeErr.Node)
nodeJournal = "\nNode Journal Logs:\n" + nodeJournal
}
@ -364,6 +368,9 @@ func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
split := strings.Split(res, "\n")
for _, rec := range split {
fields := strings.Fields(string(rec))
if len(fields) < 8 {
return nil, fmt.Errorf("invalid pod record: %s", rec)
}
pod := Pod{
NameSpace: fields[0],
Name: fields[1],

View File

@ -14,7 +14,7 @@ import (
// Valid nodeOS:
// generic/ubuntu2004, generic/centos7, generic/rocky8
// opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
// opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")

View File

@ -14,7 +14,7 @@ import (
// Valid nodeOS:
// generic/ubuntu2004, generic/centos7, generic/rocky8,
// opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
// opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")