mirror of https://github.com/k3s-io/k3s
[Release-1.24] Bulk Backport of Testing Changes
* Expand startup integration test * add new data-dir subtest * Added node flag subtest * Fix to E2E tests * Convert existing test to new server logging Signed-off-by: Derek Nola <derek.nola@suse.com> * Convert nightly install to v1.24 channel Signed-off-by: Derek Nola <derek.nola@suse.com>pull/6113/head
parent
b8f05e4904
commit
cf684c74a3
|
@ -60,7 +60,7 @@ jobs:
|
|||
- name: Run Integration Tests
|
||||
run: |
|
||||
chmod +x ./dist/artifacts/k3s
|
||||
sudo -E env "PATH=$PATH" go test ./tests/integration/... -run Integration
|
||||
sudo -E env "PATH=$PATH" go test -v ./tests/integration/... -run Integration
|
||||
- name: On Failure, Launch Debug Session
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
@ -11,12 +11,8 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
channel: [stable]
|
||||
channel: [v1.24]
|
||||
vm: [centos-7, rocky-8, fedora-coreos, opensuse-leap, opensuse-microos, ubuntu-focal]
|
||||
include:
|
||||
- {channel: latest, vm: rocky-8}
|
||||
- {channel: latest, vm: ubuntu-focal}
|
||||
- {channel: latest, vm: opensuse-leap}
|
||||
max-parallel: 2
|
||||
defaults:
|
||||
run:
|
||||
|
|
|
@ -42,7 +42,7 @@ def dockerInstall(vm)
|
|||
v.memory = NODE_MEMORY + 1024
|
||||
end
|
||||
if vm.box.to_s.include?("ubuntu")
|
||||
vm.provision "shell", inline: "apt install -y docker.io"
|
||||
vm.provision "shell", inline: "apt update; apt install -y docker.io"
|
||||
end
|
||||
if vm.box.to_s.include?("Leap")
|
||||
vm.provision "shell", inline: "zypper install -y docker apparmor-parser"
|
||||
|
|
|
@ -111,7 +111,7 @@ def getDBType(role, role_num, vm)
|
|||
elsif EXTERNAL_DB == "postgres"
|
||||
if role.include?("server") && role_num == 0
|
||||
dockerInstall(vm)
|
||||
vm.provision "Start postgres", inline: "docker run -d -p 5432:5432 --name #{EXTERNAL_DB} -e POSTGRES_PASSWORD=e2e postgres:14-alpine"
|
||||
vm.provision "Start postgres", type: "shell", inline: "docker run -d -p 5432:5432 --name postgres -e POSTGRES_PASSWORD=e2e postgres:14-alpine"
|
||||
vm.provision "shell", inline: "echo \"Wait for postgres to startup\"; sleep 10"
|
||||
return "datastore-endpoint: 'postgres://postgres:e2e@#{NETWORK_PREFIX}.100:5432/k3s?sslmode=disable'"
|
||||
elsif role.include?("server") && role_num != 0
|
||||
|
|
|
@ -26,7 +26,7 @@ var _ = BeforeSuite(func() {
|
|||
}
|
||||
})
|
||||
|
||||
var _ = Describe("certificate rotation", func() {
|
||||
var _ = Describe("certificate rotation", Ordered, func() {
|
||||
BeforeEach(func() {
|
||||
if testutil.IsExistingServer() && !testutil.ServerArgsPresent(serverArgs) {
|
||||
Skip("Test needs k3s server with: " + strings.Join(serverArgs, " "))
|
||||
|
@ -79,8 +79,11 @@ var _ = Describe("certificate rotation", func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
if CurrentSpecReport().Failed() {
|
||||
testutil.K3sDumpLog(server)
|
||||
}
|
||||
Expect(testutil.K3sKillServer(server)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(testLock, "")).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(-1, "")).To(Succeed())
|
||||
Expect(testutil.K3sKillServer(server2)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(testLock, tmpdDataDir)).To(Succeed())
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ var _ = BeforeSuite(func() {
|
|||
}
|
||||
})
|
||||
|
||||
var _ = Describe("custom etcd args", func() {
|
||||
var _ = Describe("custom etcd args", Ordered, func() {
|
||||
BeforeEach(func() {
|
||||
if testutil.IsExistingServer() && !testutil.ServerArgsPresent(customEtcdArgsServerArgs) {
|
||||
Skip("Test needs k3s server with: " + strings.Join(customEtcdArgsServerArgs, " "))
|
||||
|
@ -54,6 +54,9 @@ var _ = Describe("custom etcd args", func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
if CurrentSpecReport().Failed() {
|
||||
testutil.K3sDumpLog(customEtcdArgsServer)
|
||||
}
|
||||
Expect(testutil.K3sKillServer(customEtcdArgsServer)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(testLock, "")).To(Succeed())
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ var _ = BeforeSuite(func() {
|
|||
}
|
||||
})
|
||||
|
||||
var _ = Describe("dual stack", func() {
|
||||
var _ = Describe("dual stack", Ordered, func() {
|
||||
BeforeEach(func() {
|
||||
if testutil.IsExistingServer() && !testutil.ServerArgsPresent(dualStackServerArgs) {
|
||||
Skip("Test needs k3s server with: " + strings.Join(dualStackServerArgs, " "))
|
||||
|
@ -55,6 +55,9 @@ var _ = Describe("dual stack", func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() && os.Getenv("CI") != "true" {
|
||||
if CurrentSpecReport().Failed() {
|
||||
testutil.K3sDumpLog(dualStackServer)
|
||||
}
|
||||
Expect(testutil.K3sKillServer(dualStackServer)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(testLock, "")).To(Succeed())
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ var _ = BeforeSuite(func() {
|
|||
}
|
||||
})
|
||||
|
||||
var _ = Describe("etcd snapshot restore", func() {
|
||||
var _ = Describe("etcd snapshot restore", Ordered, func() {
|
||||
BeforeEach(func() {
|
||||
if testutil.IsExistingServer() && !testutil.ServerArgsPresent(restoreServerArgs) {
|
||||
Skip("Test needs k3s server with: " + strings.Join(restoreServerArgs, " "))
|
||||
|
@ -108,6 +108,9 @@ var _ = Describe("etcd snapshot restore", func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
if CurrentSpecReport().Failed() {
|
||||
testutil.K3sDumpLog(server1)
|
||||
}
|
||||
Expect(testutil.K3sKillServer(server1)).To(Succeed())
|
||||
Expect(testutil.K3sKillServer(server2)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(testLock, tmpdDataDir)).To(Succeed())
|
||||
|
|
|
@ -25,7 +25,7 @@ var _ = BeforeSuite(func() {
|
|||
}
|
||||
})
|
||||
|
||||
var _ = Describe("etcd snapshots", func() {
|
||||
var _ = Describe("etcd snapshots", Ordered, func() {
|
||||
BeforeEach(func() {
|
||||
if testutil.IsExistingServer() && !testutil.ServerArgsPresent(serverArgs) {
|
||||
Skip("Test needs k3s server with: " + strings.Join(serverArgs, " "))
|
||||
|
@ -115,6 +115,9 @@ var _ = Describe("etcd snapshots", func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
if CurrentSpecReport().Failed() {
|
||||
testutil.K3sDumpLog(server)
|
||||
}
|
||||
Expect(testutil.K3sKillServer(server)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(testLock, "")).To(Succeed())
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
|
@ -16,6 +17,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vishvananda/netlink"
|
||||
"golang.org/x/sys/unix"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -28,8 +30,8 @@ var existingServer = "False"
|
|||
const lockFile = "/tmp/k3s-test.lock"
|
||||
|
||||
type K3sServer struct {
|
||||
cmd *exec.Cmd
|
||||
scanner *bufio.Scanner
|
||||
cmd *exec.Cmd
|
||||
log *os.File
|
||||
}
|
||||
|
||||
func findK3sExecutable() string {
|
||||
|
@ -160,12 +162,12 @@ func CheckDeployments(deployments []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func ParsePods() ([]corev1.Pod, error) {
|
||||
func ParsePods(opts metav1.ListOptions) ([]corev1.Pod, error) {
|
||||
clientSet, err := k8sClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := clientSet.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{})
|
||||
pods, err := clientSet.CoreV1().Pods("").List(context.Background(), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -217,14 +219,20 @@ func K3sStartServer(inputArgs ...string) (*K3sServer, error) {
|
|||
cmd := exec.Command(k3sBin, k3sCmd...)
|
||||
// Give the server a new group id so we can kill it and its children later
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
cmdOut, _ := cmd.StderrPipe()
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Start()
|
||||
return &K3sServer{cmd, bufio.NewScanner(cmdOut)}, err
|
||||
// Pipe output to a file for debugging later
|
||||
f, err := os.Create("./k3log.txt")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd.Stderr = f
|
||||
err = cmd.Start()
|
||||
return &K3sServer{cmd, f}, err
|
||||
}
|
||||
|
||||
// K3sKillServer terminates the running K3s server and its children
|
||||
func K3sKillServer(server *K3sServer) error {
|
||||
server.log.Close()
|
||||
os.Remove(server.log.Name())
|
||||
pgid, err := syscall.Getpgid(server.cmd.Process.Pid)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ESRCH) {
|
||||
|
@ -242,6 +250,10 @@ func K3sKillServer(server *K3sServer) error {
|
|||
if _, err = server.cmd.Process.Wait(); err != nil {
|
||||
return errors.Wrap(err, "failed to wait for k3s process exit")
|
||||
}
|
||||
//Unmount all the associated filesystems
|
||||
unmountFolder("/run/k3s")
|
||||
unmountFolder("/run/netns/cni-")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -278,6 +290,21 @@ func K3sCleanup(k3sTestLock int, dataDir string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func K3sDumpLog(server *K3sServer) error {
|
||||
server.log.Close()
|
||||
log, err := os.Open(server.log.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer log.Close()
|
||||
b, err := ioutil.ReadAll(log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Server Log Dump:\n\n%s\n\n", b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunCommand Runs command on the host
|
||||
func RunCommand(cmd string) (string, error) {
|
||||
c := exec.Command("bash", "-c", cmd)
|
||||
|
@ -290,6 +317,32 @@ func RunCommand(cmd string) (string, error) {
|
|||
return out.String(), nil
|
||||
}
|
||||
|
||||
func unmountFolder(folder string) error {
|
||||
mounts := []string{}
|
||||
f, err := os.ReadFile("/proc/self/mounts")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, line := range strings.Split(string(f), "\n") {
|
||||
columns := strings.Split(line, " ")
|
||||
if len(columns) > 1 {
|
||||
mounts = append(mounts, columns[1])
|
||||
}
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
if strings.Contains(mount, folder) {
|
||||
if err := unix.Unmount(mount, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = os.Remove(mount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func k8sClient() (*kubernetes.Clientset, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", "/etc/rancher/k3s/k3s.yaml")
|
||||
if err != nil {
|
||||
|
|
|
@ -84,6 +84,9 @@ var _ = Describe("local storage", func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
if CurrentSpecReport().Failed() {
|
||||
testutil.K3sDumpLog(localStorageServer)
|
||||
}
|
||||
Expect(testutil.K3sKillServer(localStorageServer)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(testLock, "")).To(Succeed())
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ var _ = BeforeSuite(func() {
|
|||
}
|
||||
})
|
||||
|
||||
var _ = Describe("secrets encryption rotation", func() {
|
||||
var _ = Describe("secrets encryption rotation", Ordered, func() {
|
||||
BeforeEach(func() {
|
||||
if testutil.IsExistingServer() {
|
||||
Skip("Test does not support running on existing k3s servers")
|
||||
|
@ -142,6 +142,9 @@ var _ = Describe("secrets encryption rotation", func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
if CurrentSpecReport().Failed() {
|
||||
testutil.K3sDumpLog(secretsEncryptionServer)
|
||||
}
|
||||
Expect(testutil.K3sKillServer(secretsEncryptionServer)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(testLock, secretsEncryptionDataDir)).To(Succeed())
|
||||
}
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
. "github.com/onsi/gomega/gstruct"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
|
@ -22,7 +26,7 @@ var _ = BeforeSuite(func() {
|
|||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
var _ = Describe("startup tests", func() {
|
||||
var _ = Describe("startup tests", Ordered, func() {
|
||||
|
||||
When("a default server is created", func() {
|
||||
It("is created with no arguments", func() {
|
||||
|
@ -33,10 +37,11 @@ var _ = Describe("startup tests", func() {
|
|||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
}, "90s", "5s").Should(Succeed())
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
})
|
||||
It("dies cleanly", func() {
|
||||
Expect(testutil.K3sKillServer(startupServer)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(-1, "")).To(Succeed())
|
||||
})
|
||||
})
|
||||
When("a etcd backed server is created", func() {
|
||||
|
@ -49,10 +54,11 @@ var _ = Describe("startup tests", func() {
|
|||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
}, "90s", "5s").Should(Succeed())
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
})
|
||||
It("dies cleanly", func() {
|
||||
Expect(testutil.K3sKillServer(startupServer)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(-1, "")).To(Succeed())
|
||||
})
|
||||
})
|
||||
When("a server without traefik is created", func() {
|
||||
|
@ -66,6 +72,9 @@ var _ = Describe("startup tests", func() {
|
|||
Eventually(func() error {
|
||||
return testutil.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server"})
|
||||
}, "90s", "10s").Should(Succeed())
|
||||
nodes, err := testutil.ParseNodes()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
})
|
||||
It("dies cleanly", func() {
|
||||
Expect(testutil.K3sKillServer(startupServer)).To(Succeed())
|
||||
|
@ -88,7 +97,7 @@ var _ = Describe("startup tests", func() {
|
|||
It("has the node deployed with correct IPs", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
}, "90s", "10s").Should(Succeed())
|
||||
}, "120s", "10s").Should(Succeed())
|
||||
|
||||
nodes, err := testutil.ParseNodes()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -105,14 +114,86 @@ var _ = Describe("startup tests", func() {
|
|||
})
|
||||
It("dies cleanly", func() {
|
||||
Expect(testutil.K3sKillServer(startupServer)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(-1, "")).To(Succeed())
|
||||
Expect(testutil.RunCommand("ip link del dummy2")).To(Equal(""))
|
||||
Expect(testutil.RunCommand("ip link del dummy3")).To(Equal(""))
|
||||
})
|
||||
})
|
||||
When("a server with different data-dir is created", func() {
|
||||
var tempDir string
|
||||
It("creates a temp directory", func() {
|
||||
var err error
|
||||
tempDir, err = os.MkdirTemp("", "k3s-data-dir")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("is created with data-dir flag", func() {
|
||||
var err error
|
||||
startupServerArgs = []string{"--data-dir", tempDir}
|
||||
startupServer, err = testutil.K3sStartServer(startupServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
nodes, err := testutil.ParseNodes()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
})
|
||||
It("has the correct files in the temp data-dir", func() {
|
||||
_, err := os.Stat(filepath.Join(tempDir, "server", "tls", "server-ca.key"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, err = os.Stat(filepath.Join(tempDir, "server", "token"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, err = os.Stat(filepath.Join(tempDir, "agent", "client-kubelet.crt"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("dies cleanly", func() {
|
||||
Expect(testutil.K3sKillServer(startupServer)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(-1, tempDir)).To(Succeed())
|
||||
})
|
||||
})
|
||||
When("a server with different node options is created", func() {
|
||||
It("is created with node-name with-node-id, node-label and node-taint flags", func() {
|
||||
var err error
|
||||
startupServerArgs = []string{"--node-name", "customnoder", "--with-node-id", "--node-label", "foo=bar", "--node-taint", "alice=bob:PreferNoSchedule"}
|
||||
startupServer, err = testutil.K3sStartServer(startupServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
nodes, err := testutil.ParseNodes()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
})
|
||||
var nodes []v1.Node
|
||||
It("has a custom node name with id appended", func() {
|
||||
var err error
|
||||
nodes, err = testutil.ParseNodes()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
Expect(nodes[0].Name).To(MatchRegexp(`-[0-9a-f]*`))
|
||||
Expect(nodes[0].Name).To(ContainSubstring("customnoder"))
|
||||
})
|
||||
It("has proper node labels and taints", func() {
|
||||
Expect(nodes[0].ObjectMeta.Labels).To(MatchKeys(IgnoreExtras, Keys{
|
||||
"foo": Equal("bar"),
|
||||
}))
|
||||
Expect(nodes[0].Spec.Taints).To(ContainElement(v1.Taint{Key: "alice", Value: "bob", Effect: v1.TaintEffectPreferNoSchedule}))
|
||||
})
|
||||
It("dies cleanly", func() {
|
||||
Expect(testutil.K3sKillServer(startupServer)).To(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
if CurrentSpecReport().Failed() {
|
||||
testutil.K3sDumpLog(startupServer)
|
||||
}
|
||||
Expect(testutil.K3sCleanup(testLock, "")).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
Loading…
Reference in New Issue