mirror of https://github.com/k3s-io/k3s
Ian Cardoso
1 year ago
committed by
GitHub
3 changed files with 210 additions and 1 deletions
@ -0,0 +1,95 @@
|
||||
ENV['VAGRANT_NO_PARALLEL'] = 'no' |
||||
NODE_ROLES = (ENV['E2E_NODE_ROLES'] || |
||||
["server-0"]) |
||||
NODE_BOXES = (ENV['E2E_NODE_BOXES'] || |
||||
['generic/ubuntu2004']) |
||||
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") |
||||
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") |
||||
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i |
||||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i |
||||
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks |
||||
NETWORK_PREFIX = "10.10.10" |
||||
install_type = "" |
||||
|
||||
def provision(vm, role, role_num, node_num) |
||||
vm.box = NODE_BOXES[node_num] |
||||
vm.hostname = role |
||||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32 |
||||
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" |
||||
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" |
||||
|
||||
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" |
||||
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" |
||||
load vagrant_defaults |
||||
|
||||
defaultOSConfigure(vm) |
||||
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) |
||||
|
||||
vm.provision "shell", inline: "ping -c 2 k3s.io" |
||||
|
||||
runS3mock = <<~'SCRIPT' |
||||
docker run -p 9090:9090 -p 9191:9191 -d -e initialBuckets=test -e debug=true -t adobe/s3mock |
||||
SCRIPT |
||||
|
||||
|
||||
if role.include?("server") && role_num == 0 |
||||
|
||||
dockerInstall(vm) |
||||
vm.provision "run-S3-mock", type: "shell", inline: runS3mock |
||||
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s| |
||||
k3s.args = "server " |
||||
k3s.config = <<~YAML |
||||
token: vagrant |
||||
node-external-ip: #{NETWORK_PREFIX}.100 |
||||
flannel-iface: eth1 |
||||
cluster-init: true |
||||
etcd-s3-insecure: true |
||||
etcd-s3-bucket: test |
||||
etcd-s3: true |
||||
etcd-s3-endpoint: localhost:9090 |
||||
etcd-s3-skip-ssl-verify: true |
||||
etcd-s3-access-key: test |
||||
YAML |
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] |
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 |
||||
end |
||||
end |
||||
|
||||
if vm.box.to_s.include?("microos") |
||||
vm.provision 'k3s-reload', type: 'reload', run: 'once' |
||||
if !EXTERNAL_DB.empty? |
||||
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}" |
||||
end |
||||
end |
||||
end |
||||
|
||||
|
||||
Vagrant.configure("2") do |config| |
||||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"] |
||||
# Default provider is libvirt, virtualbox is only provided as a backup |
||||
config.vm.provider "libvirt" do |v| |
||||
v.cpus = NODE_CPUS |
||||
v.memory = NODE_MEMORY |
||||
end |
||||
config.vm.provider "virtualbox" do |v| |
||||
v.cpus = NODE_CPUS |
||||
v.memory = NODE_MEMORY |
||||
end |
||||
|
||||
if NODE_ROLES.kind_of?(String) |
||||
NODE_ROLES = NODE_ROLES.split(" ", -1) |
||||
end |
||||
if NODE_BOXES.kind_of?(String) |
||||
NODE_BOXES = NODE_BOXES.split(" ", -1) |
||||
end |
||||
|
||||
# Must iterate on the index, vagrant does not understand iterating |
||||
# over the node roles themselves |
||||
NODE_ROLES.length.times do |i| |
||||
name = NODE_ROLES[i] |
||||
role_num = name.split("-", -1).pop.to_i |
||||
config.vm.define name do |node| |
||||
provision(node.vm, name, role_num, i) |
||||
end |
||||
end |
||||
end |
@ -0,0 +1,114 @@
|
||||
package s3 |
||||
|
||||
import ( |
||||
"flag" |
||||
"fmt" |
||||
"os" |
||||
"strings" |
||||
"testing" |
||||
|
||||
"github.com/k3s-io/k3s/tests/e2e" |
||||
. "github.com/onsi/ginkgo/v2" |
||||
. "github.com/onsi/gomega" |
||||
) |
||||
|
||||
// Valid nodeOS:
|
||||
// generic/ubuntu2004, generic/centos7, generic/rocky8,
|
||||
// opensuse/Leap-15.3.x86_64
|
||||
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") |
||||
var ci = flag.Bool("ci", false, "running on CI") |
||||
var local = flag.Bool("local", false, "deploy a locally built K3s binary") |
||||
|
||||
// Environment Variables Info:
|
||||
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
|
||||
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
|
||||
// E2E_REGISTRY: true/false (default: false)
|
||||
|
||||
func Test_E2ES3(t *testing.T) { |
||||
RegisterFailHandler(Fail) |
||||
flag.Parse() |
||||
suiteConfig, reporterConfig := GinkgoConfiguration() |
||||
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig) |
||||
} |
||||
|
||||
var ( |
||||
kubeConfigFile string |
||||
serverNodeNames []string |
||||
agentNodeNames []string |
||||
) |
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport) |
||||
|
||||
var _ = Describe("Verify Create", Ordered, func() { |
||||
Context("Cluster :", func() { |
||||
It("Starts up with no issues", func() { |
||||
var err error |
||||
if *local { |
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 0) |
||||
} else { |
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 0) |
||||
} |
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) |
||||
fmt.Println("CLUSTER CONFIG") |
||||
fmt.Println("OS:", *nodeOS) |
||||
fmt.Println("Server Nodes:", serverNodeNames) |
||||
fmt.Println("Agent Nodes:", agentNodeNames) |
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) |
||||
Expect(err).NotTo(HaveOccurred()) |
||||
}) |
||||
It("Checks Node and Pod Status", func() { |
||||
fmt.Printf("\nFetching node status\n") |
||||
Eventually(func(g Gomega) { |
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false) |
||||
g.Expect(err).NotTo(HaveOccurred()) |
||||
for _, node := range nodes { |
||||
g.Expect(node.Status).Should(Equal("Ready")) |
||||
} |
||||
}, "620s", "5s").Should(Succeed()) |
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true) |
||||
|
||||
fmt.Printf("\nFetching Pods status\n") |
||||
Eventually(func(g Gomega) { |
||||
pods, err := e2e.ParsePods(kubeConfigFile, false) |
||||
g.Expect(err).NotTo(HaveOccurred()) |
||||
for _, pod := range pods { |
||||
if strings.Contains(pod.Name, "helm-install") { |
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) |
||||
} else { |
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name) |
||||
} |
||||
} |
||||
}, "620s", "5s").Should(Succeed()) |
||||
_, _ = e2e.ParsePods(kubeConfigFile, true) |
||||
}) |
||||
|
||||
It("ensures s3 mock is working", func() { |
||||
a, err := e2e.RunCmdOnNode("sudo docker ps -a | grep mock\n", serverNodeNames[0]) |
||||
fmt.Println(a) |
||||
Expect(err).NotTo(HaveOccurred()) |
||||
}) |
||||
It("save s3 snapshot", func() { |
||||
a, err := e2e.RunCmdOnNode("sudo k3s etcd-snapshot save", serverNodeNames[0]) |
||||
Expect(err).NotTo(HaveOccurred()) |
||||
Expect(strings.Contains(a, "S3 bucket test exists")).Should(Equal(true)) |
||||
Expect(strings.Contains(a, "Uploading snapshot")).Should(Equal(true)) |
||||
Expect(strings.Contains(a, "S3 upload complete for")).Should(Equal(true)) |
||||
|
||||
}) |
||||
}) |
||||
}) |
||||
|
||||
var failed bool |
||||
var _ = AfterEach(func() { |
||||
failed = failed || CurrentSpecReport().Failed() |
||||
}) |
||||
|
||||
var _ = AfterSuite(func() { |
||||
|
||||
if failed && !*ci { |
||||
fmt.Println("FAILED!") |
||||
} else { |
||||
Expect(e2e.DestroyCluster()).To(Succeed()) |
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed()) |
||||
} |
||||
}) |
Loading…
Reference in new issue