mirror of https://github.com/k3s-io/k3s
E2E Improvements and groundwork for test-pad tool (#5593)
* Add rancher install sript, taints to cp/etcd roles * Revert back to generic/ubuntu2004, libvirt networking is unreliable on opensuse * Added support for alpine * Rancher deployment script * Refactor installType into function * Cleanup splitserver test Signed-off-by: Derek Nola <derek.nola@suse.com>pull/5538/head
parent
168b14b08e
commit
efab09bc1f
|
@ -0,0 +1,63 @@
|
|||
#!/bin/bash
|
||||
node_ip=$1
|
||||
|
||||
echo "Give K3s time to startup"
|
||||
sleep 10
|
||||
kubectl -n kube-system rollout status deploy/coredns
|
||||
kubectl -n kube-system rollout status deploy/local-path-provisioner
|
||||
|
||||
cat << EOF > /var/lib/rancher/k3s/server/manifests/rancher.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cert-manager
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cattle-system
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: cert-manager
|
||||
spec:
|
||||
targetNamespace: cert-manager
|
||||
version: v1.6.1
|
||||
chart: cert-manager
|
||||
repo: https://charts.jetstack.io
|
||||
set:
|
||||
installCRDs: "true"
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: rancher
|
||||
spec:
|
||||
targetNamespace: cattle-system
|
||||
version: 2.6.5
|
||||
chart: rancher
|
||||
repo: https://releases.rancher.com/server-charts/latest
|
||||
set:
|
||||
ingress.tls.source: "rancher"
|
||||
hostname: "$node_ip.nip.io"
|
||||
replicas: 1
|
||||
EOF
|
||||
|
||||
|
||||
echo "Give Rancher time to startup"
|
||||
sleep 20
|
||||
kubectl -n cert-manager rollout status deploy/cert-manager
|
||||
while ! kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}' &> /dev/null; do
|
||||
((iterations++))
|
||||
if [ "$iterations" -ge 8 ]; then
|
||||
echo "Unable to find bootstrap-secret"
|
||||
exit 1
|
||||
fi
|
||||
echo "waiting for bootstrap-secret..."
|
||||
sleep 20
|
||||
done
|
||||
echo https://"$node_ip".nip.io/dashboard/?setup=$(kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}')
|
|
@ -44,7 +44,7 @@ def provision(vm, role, role_num, node_num)
|
|||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
end
|
||||
if vm.box.include?("microos")
|
||||
if vm.box.to_s.include?("microos")
|
||||
vm.provision 'k3s-reload', type: 'reload', run: 'once'
|
||||
end
|
||||
end
|
||||
|
|
|
@ -9,7 +9,19 @@ NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
|
|||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 1024).to_i
|
||||
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
|
||||
NETWORK_PREFIX = "10.10.10"
|
||||
install_type = ""
|
||||
|
||||
def installType(vm)
|
||||
if RELEASE_VERSION == "skip"
|
||||
return "INSTALL_K3S_SKIP_DOWNLOAD=true"
|
||||
elsif !RELEASE_VERSION.empty?
|
||||
return "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
|
||||
end
|
||||
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
|
||||
# MicroOS requires it not be in a /tmp/ or other root system folder
|
||||
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
|
||||
vm.provision "Acquire latest commit", type: "shell", path: scripts_location + "/latest_commit.sh", args: [GITHUB_BRANCH, "/home/vagrant/k3s_commits"]
|
||||
return "INSTALL_K3S_COMMIT=$(head\ -n\ 1\ /home/vagrant/k3s_commits)"
|
||||
end
|
||||
|
||||
def provision(vm, role, role_num, node_num)
|
||||
vm.box = NODE_BOXES[node_num]
|
||||
|
@ -17,20 +29,12 @@ def provision(vm, role, role_num, node_num)
|
|||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
|
||||
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
|
||||
|
||||
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
|
||||
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
|
||||
load vagrant_defaults
|
||||
|
||||
defaultOSConfigure(vm)
|
||||
|
||||
if !RELEASE_VERSION.empty?
|
||||
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
|
||||
else
|
||||
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
|
||||
# MicroOS requires it not be in a /tmp/ or other root system folder
|
||||
vm.provision "Acquire latest commit", type: "shell", path: scripts_location + "/latest_commit.sh", args: [GITHUB_BRANCH, "/home/vagrant/k3s_commits"]
|
||||
install_type = "INSTALL_K3S_COMMIT=$(head\ -n\ 1\ /home/vagrant/k3s_commits)"
|
||||
end
|
||||
install_type = installType(vm)
|
||||
vm.provision "ping k3s.io", type: "shell", inline: "ping -c 2 k3s.io"
|
||||
|
||||
if node_num == 0 && !role.include?("server") && !role.include?("etcd")
|
||||
|
@ -46,6 +50,8 @@ def provision(vm, role, role_num, node_num)
|
|||
disable-apiserver: true
|
||||
disable-controller-manager: true
|
||||
disable-scheduler: true
|
||||
node-taint:
|
||||
- node-role.kubernetes.io/etcd:NoExecute
|
||||
YAML
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
|
@ -59,13 +65,33 @@ def provision(vm, role, role_num, node_num)
|
|||
disable-apiserver: true
|
||||
disable-controller-manager: true
|
||||
disable-scheduler: true
|
||||
node-taint:
|
||||
- node-role.kubernetes.io/etcd:NoExecute
|
||||
|
||||
YAML
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
elsif role.include?("server") && role.include?("cp")
|
||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.args = "server --disable-etcd --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1"
|
||||
k3s.args = "server"
|
||||
k3s.config = <<~YAML
|
||||
server: https://#{NETWORK_PREFIX}.100:6443
|
||||
flannel-iface: eth1
|
||||
disable-etcd: true
|
||||
node-taint:
|
||||
- node-role.kubernetes.io/control-plane:NoSchedule
|
||||
YAML
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
elsif role.include?("server") && role.include?("all")
|
||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.args = "server"
|
||||
k3s.config = <<~YAML
|
||||
server: https://#{NETWORK_PREFIX}.100:6443
|
||||
flannel-iface: eth1
|
||||
YAML
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
|
@ -77,7 +103,7 @@ def provision(vm, role, role_num, node_num)
|
|||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
end
|
||||
if vm.box.include?("microos")
|
||||
if vm.box.to_s.include?("microos")
|
||||
vm.provision 'k3s-reload', type: 'reload', run: 'once'
|
||||
end
|
||||
end
|
||||
|
|
|
@ -114,23 +114,17 @@ var _ = Describe("Verify Create", func() {
|
|||
_, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should((ContainSubstring("test-clusterip")))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
|
||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
||||
cmd := "curl -L --insecure http://" + clusterip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
cmd = "curl -L --insecure http://" + clusterip + "/name.html"
|
||||
for _, nodeName := range cpNodeNames {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
Expect(res).Should(ContainSubstring("test-clusterip"))
|
||||
}, "120s", "10s").Should(Succeed())
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, nodeName)
|
||||
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -144,21 +138,15 @@ var _ = Describe("Verify Create", func() {
|
|||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||
|
||||
cmd = "curl -L --insecure http://" + nodeExternalIP + ":" + nodeport + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -173,21 +161,15 @@ var _ = Describe("Verify Create", func() {
|
|||
port, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -198,14 +180,9 @@ var _ = Describe("Verify Create", func() {
|
|||
for _, nodeName := range cpNodeNames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-ingress"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -213,37 +190,30 @@ var _ = Describe("Verify Create", func() {
|
|||
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
||||
|
||||
nodes, _ := e2e.ParseNodes(kubeConfigFile, false)
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("NODE COUNT")
|
||||
fmt.Println(len(nodes))
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
||||
}, "420s", "10s").Should(Succeed())
|
||||
fmt.Println("CP COUNT")
|
||||
fmt.Println(len(cpNodeNames))
|
||||
g.Expect(len(cpNodeNames)).Should((Equal(count)), "Daemonset pod count does not match cp node count")
|
||||
}, "240s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies dns access", func() {
|
||||
_, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
res, _ := e2e.RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("dnsutils"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("dnsutils"), "failed cmd: "+cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
fmt.Println(cmd)
|
||||
res, _ := e2e.RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
cmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"), "failed cmd: "+cmd)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -55,7 +55,7 @@ def provision(vm, role, role_num, node_num)
|
|||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
end
|
||||
if vm.box.include?("microos")
|
||||
if vm.box.to_s.include?("microos")
|
||||
vm.provision 'k3s-reload', type: 'reload', run: 'once'
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
def defaultOSConfigure(vm)
|
||||
if vm.box.include?("generic/ubuntu")
|
||||
vm.provision "Set DNS", type: "shell", inline: "systemd-resolve --set-dns=8.8.8.8 --interface=eth0"
|
||||
vm.provision "Install jq", type: "shell", inline: "apt install -y jq"
|
||||
elsif vm.box.include?("Leap")
|
||||
vm.provision "Install jq", type: "shell", inline: "zypper install -y jq"
|
||||
elsif vm.box.include?("microos")
|
||||
vm.provision "Install jq", type: "shell", inline: "transactional-update pkg install -y jq"
|
||||
vm.provision 'reload', run: 'once'
|
||||
end
|
||||
box = vm.box.to_s
|
||||
if box.include?("generic/ubuntu")
|
||||
vm.provision "Set DNS", type: "shell", inline: "systemd-resolve --set-dns=8.8.8.8 --interface=eth0"
|
||||
vm.provision "Install jq", type: "shell", inline: "apt install -y jq"
|
||||
elsif box.include?("Leap") || box.include?("Tumbleweed")
|
||||
vm.provision "Install jq", type: "shell", inline: "zypper install -y jq"
|
||||
elsif box.include?("alpine")
|
||||
vm.provision "Install tools", type: "shell", inline: "apk add jq coreutils"
|
||||
elsif box.include?("microos")
|
||||
vm.provision "Install jq", type: "shell", inline: "transactional-update pkg install -y jq"
|
||||
vm.provision 'reload', run: 'once'
|
||||
end
|
||||
end
|
|
@ -2,11 +2,12 @@ ENV['VAGRANT_NO_PARALLEL'] = 'no'
|
|||
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
|
||||
["server-0", "server-1", "server-2", "agent-0", "agent-1"])
|
||||
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
|
||||
['opensuse/Leap-15.3.x86_64', 'opensuse/Leap-15.3.x86_64', 'opensuse/Leap-15.3.x86_64', 'opensuse/Leap-15.3.x86_64', 'opensuse/Leap-15.3.x86_64'])
|
||||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
|
||||
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
|
||||
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
|
||||
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
|
||||
HARDENED = (ENV['E2E_HARDENED'] || "")
|
||||
RANCHER = (ENV['E2E_RANCHER'] || "")
|
||||
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
|
||||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 1024).to_i
|
||||
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
|
||||
|
@ -27,7 +28,9 @@ def provision(vm, role, role_num, node_num)
|
|||
|
||||
defaultOSConfigure(vm)
|
||||
|
||||
if !RELEASE_VERSION.empty?
|
||||
if RELEASE_VERSION == "skip"
|
||||
install_type = "INSTALL_K3S_SKIP_DOWNLOAD=true"
|
||||
elsif !RELEASE_VERSION.empty?
|
||||
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
|
||||
else
|
||||
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
|
||||
|
@ -51,6 +54,7 @@ def provision(vm, role, role_num, node_num)
|
|||
token: vagrant
|
||||
node-external-ip: #{NETWORK_PREFIX}.100
|
||||
flannel-iface: eth1
|
||||
tls-san: #{NETWORK_PREFIX}.100.nip.io
|
||||
#{db_type}
|
||||
#{hardened_arg}
|
||||
YAML
|
||||
|
@ -89,12 +93,16 @@ def provision(vm, role, role_num, node_num)
|
|||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
end
|
||||
if vm.box.include?("microos")
|
||||
if vm.box.to_s.include?("microos")
|
||||
vm.provision 'k3s-reload', type: 'reload', run: 'once'
|
||||
if !EXTERNAL_DB.empty?
|
||||
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}"
|
||||
end
|
||||
end
|
||||
# This step does not run by default and is designed to be called by higher level tools
|
||||
if !RANCHER.empty?
|
||||
vm.provision "Install Rancher", type: "shell", run: "never", path: scripts_location + "/rancher.sh", args: node_ip
|
||||
end
|
||||
end
|
||||
|
||||
def getDBType(role, role_num, vm)
|
||||
|
@ -139,13 +147,13 @@ def dockerInstall(vm)
|
|||
vm.provider "virtualbox" do |v|
|
||||
v.memory = NODE_MEMORY + 1024
|
||||
end
|
||||
if vm.box.include?("ubuntu")
|
||||
if vm.box.to_s.include?("ubuntu")
|
||||
vm.provision "shell", inline: "apt install -y docker.io"
|
||||
end
|
||||
if vm.box.include?("Leap")
|
||||
if vm.box.to_s.include?("Leap")
|
||||
vm.provision "shell", inline: "zypper install -y docker apparmor-parser"
|
||||
end
|
||||
if vm.box.include?("microos")
|
||||
if vm.box.to_s.include?("microos")
|
||||
vm.provision "shell", inline: "transactional-update pkg install -y docker apparmor-parser"
|
||||
vm.provision 'docker-reload', type: 'reload', run: 'once'
|
||||
vm.provision "shell", inline: "systemctl enable --now docker"
|
||||
|
|
Loading…
Reference in New Issue