mirror of https://github.com/k3s-io/k3s
E2E: Use sudo for all RunCmdOnNode
Signed-off-by: Derek Nola <derek.nola@suse.com>pull/7716/head
parent
00f3e2413f
commit
4a68fbd8e9
|
@ -78,9 +78,9 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() {
|
|||
|
||||
It("Generates New CA Certificates", func() {
|
||||
cmds := []string{
|
||||
"sudo mkdir -p /opt/rancher/k3s/server",
|
||||
"sudo cp -r /var/lib/rancher/k3s/server/tls /opt/rancher/k3s/server",
|
||||
"sudo DATA_DIR=/opt/rancher/k3s /tmp/generate-custom-ca-certs.sh",
|
||||
"mkdir -p /opt/rancher/k3s/server",
|
||||
"cp -r /var/lib/rancher/k3s/server/tls /opt/rancher/k3s/server",
|
||||
"DATA_DIR=/opt/rancher/k3s /tmp/generate-custom-ca-certs.sh",
|
||||
}
|
||||
for _, cmd := range cmds {
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
|
@ -89,7 +89,7 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Rotates CA Certificates", func() {
|
||||
cmd := "sudo k3s certificate rotate-ca --path=/opt/rancher/k3s/server"
|
||||
cmd := "k3s certificate rotate-ca --path=/opt/rancher/k3s/server"
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
|
|
@ -85,7 +85,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies encryption start stage", func() {
|
||||
cmd := "sudo k3s secrets-encrypt status"
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -96,11 +96,11 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Prepares for Secrets-Encryption Rotation", func() {
|
||||
cmd := "sudo k3s secrets-encrypt prepare"
|
||||
cmd := "k3s secrets-encrypt prepare"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
for i, nodeName := range serverNodeNames {
|
||||
cmd := "sudo k3s secrets-encrypt status"
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match"))
|
||||
|
@ -140,7 +140,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies encryption prepare stage", func() {
|
||||
cmd := "sudo k3s secrets-encrypt status"
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
|
@ -153,12 +153,12 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Rotates the Secrets-Encryption Keys", func() {
|
||||
cmd := "sudo k3s secrets-encrypt rotate"
|
||||
cmd := "k3s secrets-encrypt rotate"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
for i, nodeName := range serverNodeNames {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "sudo k3s secrets-encrypt status"
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
g.Expect(err).NotTo(HaveOccurred(), res)
|
||||
g.Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match"))
|
||||
|
@ -176,7 +176,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies encryption rotate stage", func() {
|
||||
cmd := "sudo k3s secrets-encrypt status"
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
|
@ -189,11 +189,11 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Reencrypts the Secrets-Encryption Keys", func() {
|
||||
cmd := "sudo k3s secrets-encrypt reencrypt"
|
||||
cmd := "k3s secrets-encrypt reencrypt"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "sudo k3s secrets-encrypt status"
|
||||
cmd = "k3s secrets-encrypt status"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
}, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
|
@ -211,7 +211,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies Encryption Reencrypt Stage", func() {
|
||||
cmd := "sudo k3s secrets-encrypt status"
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
|
@ -226,15 +226,15 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
|
||||
Context("Disabling Secrets-Encryption", func() {
|
||||
It("Disables encryption", func() {
|
||||
cmd := "sudo k3s secrets-encrypt disable"
|
||||
cmd := "k3s secrets-encrypt disable"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "sudo k3s secrets-encrypt reencrypt -f --skip"
|
||||
cmd = "k3s secrets-encrypt reencrypt -f --skip"
|
||||
res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "sudo k3s secrets-encrypt status"
|
||||
cmd = "k3s secrets-encrypt status"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
}, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
|
@ -257,7 +257,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies encryption disabled on all nodes", func() {
|
||||
cmd := "sudo k3s secrets-encrypt status"
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled"))
|
||||
|
@ -269,15 +269,15 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
|
||||
Context("Enabling Secrets-Encryption", func() {
|
||||
It("Enables encryption", func() {
|
||||
cmd := "sudo k3s secrets-encrypt enable"
|
||||
cmd := "k3s secrets-encrypt enable"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "sudo k3s secrets-encrypt reencrypt -f --skip"
|
||||
cmd = "k3s secrets-encrypt reencrypt -f --skip"
|
||||
res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "sudo k3s secrets-encrypt status"
|
||||
cmd = "k3s secrets-encrypt status"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
}, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
|
@ -288,7 +288,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies encryption enabled on all nodes", func() {
|
||||
cmd := "sudo k3s secrets-encrypt status"
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
|
|
|
@ -102,10 +102,10 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
|
|||
|
||||
It("Verifies Snapshot is created", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "sudo k3s etcd-snapshot save"
|
||||
cmd := "k3s etcd-snapshot save"
|
||||
_, err := e2e.RunCmdOnNode(cmd, "server-0")
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
cmd = "sudo ls /var/lib/rancher/k3s/server/db/snapshots/"
|
||||
cmd = "ls /var/lib/rancher/k3s/server/db/snapshots/"
|
||||
snapshotname, err = e2e.RunCmdOnNode(cmd, "server-0")
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("Snapshot Name", snapshotname)
|
||||
|
@ -128,7 +128,7 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
|
|||
Context("Cluster is reset normally", func() {
|
||||
It("Resets the cluster", func() {
|
||||
for _, nodeName := range serverNodeNames {
|
||||
cmd := "sudo systemctl stop k3s"
|
||||
cmd := "systemctl stop k3s"
|
||||
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
|
||||
if nodeName != serverNodeNames[0] {
|
||||
cmd = "k3s-killall.sh"
|
||||
|
@ -136,12 +136,12 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
|
|||
}
|
||||
}
|
||||
|
||||
cmd := "sudo k3s server --cluster-reset"
|
||||
cmd := "k3s server --cluster-reset"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now"))
|
||||
|
||||
cmd = "sudo systemctl start k3s"
|
||||
cmd = "systemctl start k3s"
|
||||
Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -165,12 +165,12 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
|
|||
// We must remove the db directory on the other servers before restarting k3s
|
||||
// otherwise the nodes may join the old cluster
|
||||
for _, nodeName := range serverNodeNames[1:] {
|
||||
cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db"
|
||||
cmd := "rm -rf /var/lib/rancher/k3s/server/db"
|
||||
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, nodeName := range serverNodeNames[1:] {
|
||||
cmd := "sudo systemctl start k3s"
|
||||
cmd := "systemctl start k3s"
|
||||
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
|
||||
time.Sleep(20 * time.Second) //Stagger the restarts for etcd leaners
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
|
|||
It("Restores the snapshot", func() {
|
||||
//Stop k3s on all nodes
|
||||
for _, nodeName := range serverNodeNames {
|
||||
cmd := "sudo systemctl stop k3s"
|
||||
cmd := "systemctl stop k3s"
|
||||
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
|
||||
if nodeName != serverNodeNames[0] {
|
||||
cmd = "k3s-killall.sh"
|
||||
|
@ -222,12 +222,12 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
|
|||
}
|
||||
}
|
||||
//Restores from snapshot on server-0
|
||||
cmd := "sudo k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname
|
||||
cmd := "k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now"))
|
||||
|
||||
cmd = "sudo systemctl start k3s"
|
||||
cmd = "systemctl start k3s"
|
||||
Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred())
|
||||
|
||||
})
|
||||
|
@ -252,12 +252,12 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
|
|||
// We must remove the db directory on the other servers before restarting k3s
|
||||
// otherwise the nodes may join the old cluster
|
||||
for _, nodeName := range serverNodeNames[1:] {
|
||||
cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db"
|
||||
cmd := "rm -rf /var/lib/rancher/k3s/server/db"
|
||||
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, nodeName := range serverNodeNames[1:] {
|
||||
cmd := "sudo systemctl start k3s"
|
||||
cmd := "systemctl start k3s"
|
||||
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -45,13 +45,13 @@ func StartK3sCluster(nodes []string, serverYAML string, agentYAML string) error
|
|||
var resetCmd string
|
||||
var startCmd string
|
||||
if strings.Contains(node, "server") {
|
||||
resetCmd = "sudo head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
|
||||
yamlCmd = fmt.Sprintf("sudo echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML)
|
||||
startCmd = "sudo systemctl start k3s"
|
||||
resetCmd = "head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
|
||||
yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML)
|
||||
startCmd = "systemctl start k3s"
|
||||
} else {
|
||||
resetCmd = "sudo head -n 4 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
|
||||
yamlCmd = fmt.Sprintf("sudo echo '%s' >> /etc/rancher/k3s/config.yaml", agentYAML)
|
||||
startCmd = "sudo systemctl start k3s-agent"
|
||||
resetCmd = "head -n 4 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
|
||||
yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", agentYAML)
|
||||
startCmd = "systemctl start k3s-agent"
|
||||
}
|
||||
if _, err := e2e.RunCmdOnNode(resetCmd, node); err != nil {
|
||||
return err
|
||||
|
@ -68,11 +68,11 @@ func StartK3sCluster(nodes []string, serverYAML string, agentYAML string) error
|
|||
|
||||
func KillK3sCluster(nodes []string) error {
|
||||
for _, node := range nodes {
|
||||
if _, err := e2e.RunCmdOnNode("sudo k3s-killall.sh", node); err != nil {
|
||||
if _, err := e2e.RunCmdOnNode("k3s-killall.sh", node); err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.Contains(node, "server") {
|
||||
if _, err := e2e.RunCmdOnNode("sudo rm -rf /var/lib/rancher/k3s/server/db", node); err != nil {
|
||||
if _, err := e2e.RunCmdOnNode("rm -rf /var/lib/rancher/k3s/server/db", node); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ func scpK3sBinary(nodeNames []string) error {
|
|||
if _, err := RunCommand(cmd); err != nil {
|
||||
return fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
|
||||
}
|
||||
if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil {
|
||||
if _, err := RunCmdOnNode("mv /tmp/k3s /usr/local/bin/", node); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -419,7 +419,7 @@ func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
|
|||
// RestartCluster restarts the k3s service on each node given
|
||||
func RestartCluster(nodeNames []string) error {
|
||||
for _, nodeName := range nodeNames {
|
||||
cmd := "sudo systemctl restart k3s* --all"
|
||||
cmd := "systemctl restart k3s* --all"
|
||||
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -430,7 +430,7 @@ func RestartCluster(nodeNames []string) error {
|
|||
// StartCluster starts the k3s service on each node given
|
||||
func StartCluster(nodeNames []string) error {
|
||||
for _, nodeName := range nodeNames {
|
||||
cmd := "sudo systemctl start k3s"
|
||||
cmd := "systemctl start k3s"
|
||||
if strings.Contains(nodeName, "agent") {
|
||||
cmd += "-agent"
|
||||
}
|
||||
|
@ -444,7 +444,7 @@ func StartCluster(nodeNames []string) error {
|
|||
// StopCluster starts the k3s service on each node given
|
||||
func StopCluster(nodeNames []string) error {
|
||||
for _, nodeName := range nodeNames {
|
||||
cmd := "sudo systemctl stop k3s*"
|
||||
cmd := "systemctl stop k3s*"
|
||||
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -454,7 +454,7 @@ func StopCluster(nodeNames []string) error {
|
|||
|
||||
// RunCmdOnNode executes a command from within the given node
|
||||
func RunCmdOnNode(cmd string, nodename string) (string, error) {
|
||||
runcmd := "vagrant ssh " + nodename + " -c \"" + cmd + "\""
|
||||
runcmd := "vagrant ssh " + nodename + " -c \"sudo " + cmd + "\""
|
||||
out, err := RunCommand(runcmd)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, nodename, out, err)
|
||||
|
|
|
@ -323,7 +323,7 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
Expect(errStop).NotTo(HaveOccurred(), "Cluster could not be stoped successfully")
|
||||
|
||||
for _, nodeName := range serverNodeNames {
|
||||
cmd := "sudo k3s certificate rotate"
|
||||
cmd := "k3s certificate rotate"
|
||||
if _, err := e2e.RunCmdOnNode(cmd, nodeName); err != nil {
|
||||
Expect(err).NotTo(HaveOccurred(), "Certificate could not be rotated successfully")
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
It("Start normally", func() {
|
||||
// Since we stopped all the server, we have to start 2 at once to get it back up
|
||||
// If we only start one at a time, the first will hang waiting for the second to be up
|
||||
_, err := e2e.RunCmdOnNode("sudo systemctl --no-block start k3s", serverNodeNames[0])
|
||||
_, err := e2e.RunCmdOnNode("systemctl --no-block start k3s", serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = e2e.StartCluster(serverNodeNames[1:])
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster could not be started successfully")
|
||||
|
@ -360,7 +360,7 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
It("Validates certificates", func() {
|
||||
const grepCert = "sudo ls -lt /var/lib/rancher/k3s/server/ | grep tls"
|
||||
const grepCert = "ls -lt /var/lib/rancher/k3s/server/ | grep tls"
|
||||
var expectResult = []string{
|
||||
"client-ca.crt", "client-ca.key", "client-ca.nochain.crt",
|
||||
"client-supervisor.crt", "client-supervisor.key",
|
||||
|
@ -381,7 +381,7 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
Expect(errGrep).NotTo(HaveOccurred(), "Certificate could not be created successfully")
|
||||
re := regexp.MustCompile("tls-[0-9]+")
|
||||
tls := re.FindAllString(grCert, -1)[0]
|
||||
final := fmt.Sprintf("sudo diff -sr /var/lib/rancher/k3s/server/tls/ /var/lib/rancher/k3s/server/%s/"+
|
||||
final := fmt.Sprintf("diff -sr /var/lib/rancher/k3s/server/tls/ /var/lib/rancher/k3s/server/%s/"+
|
||||
"| grep -i identical | cut -f4 -d ' ' | xargs basename -a \n", tls)
|
||||
finalResult, finalErr = e2e.RunCmdOnNode(final, nodeName)
|
||||
Expect(finalErr).NotTo(HaveOccurred(), "Final Certification does not created successfully")
|
||||
|
|
|
@ -322,7 +322,7 @@ var _ = Describe("Test:", func() {
|
|||
MIPs := strings.Split(createcluster.MasterIPs, ",")
|
||||
|
||||
for _, ip := range MIPs {
|
||||
cmd := "sudo sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/master_cmd"
|
||||
cmd := "sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/master_cmd"
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -338,7 +338,7 @@ var _ = Describe("Test:", func() {
|
|||
|
||||
WIPs := strings.Split(createcluster.WorkerIPs, ",")
|
||||
for _, ip := range WIPs {
|
||||
cmd := "sudo sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/agent_cmd"
|
||||
cmd := "sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/agent_cmd"
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
|
Loading…
Reference in New Issue