[release-1.22] tests/vagrant (#4499)

From c77efe64e1:
- workflow: cgroup2 ➡️ cgroup
- tests/cgroup2/ ➡️ tests/vagrant/cgroup/unified/fedora-34/

Signed-off-by: Jacob Blain Christen <jacob@rancher.com>
pull/4541/head
Jacob Blain Christen 2021-11-17 16:30:23 -07:00 committed by GitHub
parent 6c34ce8fef
commit b13459b5cf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 362 additions and 179 deletions

91
.github/workflows/cgroup.yaml vendored Normal file
View File

@ -0,0 +1,91 @@
name: Control Group
on:
push:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/**"
- "!tests/vagrant/cgroup/**"
- ".github/**"
- "!.github/workflows/cgroup.yaml"
pull_request:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/**"
- "!tests/vagrant/cgroup/**"
- ".github/**"
- "!.github/workflows/cgroup.yaml"
workflow_dispatch: {}
jobs:
prep:
name: "Prepare"
runs-on: ubuntu-20.04
timeout-minutes: 40
steps:
- name: "Checkout"
uses: actions/checkout@v2
with: { fetch-depth: 1 }
- name: "Build"
run: DOCKER_BUILDKIT=1 SKIP_VALIDATE=1 make
- name: "Upload"
uses: actions/upload-artifact@v2
with: { name: k3s, path: dist/artifacts/k3s }
test:
name: "Smoke Test"
needs: prep
# nested virtualization is only available on macOS hosts
runs-on: macos-10.15
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
vm: [fedora-34]
mode: [unified]
max-parallel: 1
defaults:
run:
working-directory: tests/vagrant/cgroup/${{ matrix.mode }}/${{ matrix.vm }}
steps:
- name: "Checkout"
uses: actions/checkout@v2
with: { fetch-depth: 1 }
- name: "Download Binary"
uses: actions/download-artifact@v2
with: { name: k3s, path: dist/artifacts/ }
- name: "Vagrant Cache"
uses: actions/cache@v2
with:
path: |
~/.vagrant.d/boxes
~/.vagrant.d/gems
key: cgroup-${{ hashFiles(format('tests/vagrant/cgroup/{0}/{1}/Vagrantfile', matrix.mode, matrix.vm)) }}
id: vagrant-cache
continue-on-error: true
- name: "Vagrant Plugin(s)"
run: vagrant plugin install vagrant-k3s
- name: "Vagrant Up ⏩ Install K3s"
run: vagrant up
- name: "K3s Start" # start k3s rootfull
run: vagrant ssh -- sudo systemctl start k3s-server
- name: "K3s Ready" # wait for k3s to be ready
run: vagrant provision --provision-with=k3s-ready
- name: "K3s Status" # kubectl get node,all -A -o wide
run: vagrant provision --provision-with=k3s-status
- name: "Sonobuoy (--mode=quick)"
env: {TEST_RESULTS_PATH: rootfull}
run: vagrant provision --provision-with=k3s-sonobuoy
- name: "K3s Stop" # stop k3s rootfull
run: vagrant ssh -- sudo systemctl stop k3s-server
- name: "Vagrant Reload"
run: vagrant reload
- name: "[Rootless] Starting K3s"
run: vagrant ssh -- systemctl --user start k3s-rootless
- name: "[Rootless] K3s Ready"
env: {TEST_KUBECONFIG: /home/vagrant/.kube/k3s.yaml}
run: vagrant provision --provision-with=k3s-ready
- name: "[Rootless] Sonobuoy (--mode=quick)"
env: {TEST_KUBECONFIG: /home/vagrant/.kube/k3s.yaml, TEST_RESULTS_PATH: rootless}
run: vagrant provision --provision-with=k3s-sonobuoy

View File

@ -1,76 +0,0 @@
name: cgroup2
on:
push:
paths-ignore:
- "**.md"
pull_request:
paths-ignore:
- "**.md"
workflow_dispatch: {}
jobs:
build:
name: "Build"
runs-on: ubuntu-20.04
timeout-minutes: 40
steps:
- name: "Checkout"
uses: actions/checkout@v2
with:
fetch-depth: 1
- name: "Make"
run: DOCKER_BUILDKIT=1 SKIP_VALIDATE=1 make
- name: "Upload k3s binary"
uses: actions/upload-artifact@v2
with:
name: k3s
path: dist/artifacts/k3s
test:
name: "Test"
needs: build
# nested virtualization is only available on macOS hosts
runs-on: macos-10.15
timeout-minutes: 40
steps:
- name: "Checkout"
uses: actions/checkout@v2
with:
fetch-depth: 1
- name: "Download k3s binary"
uses: actions/download-artifact@v2
with:
name: k3s
path: ./tests/cgroup2
- name: "Boot Fedora VM"
run: |
cp -r k3s.service k3s-rootless.service ./tests/util ./tests/cgroup2
cd ./tests/cgroup2
vagrant up
vagrant ssh-config >> ~/.ssh/config
- name: "Starting k3s"
run: |
ssh default -- sudo systemctl start k3s
# Sonobuoy requires CoreDNS to be ready
- name: "Waiting for CoreDNS to be ready"
run: |
ssh default -- sudo KUBECONFIG=/etc/rancher/k3s/k3s.yaml /vagrant/util/wait-for-coredns.sh
# Vagrant is slow, so we set --mode=quick here
- name: "Run Sonobuoy (--mode=quick)"
run: |
ssh default -- sudo KUBECONFIG=/etc/rancher/k3s/k3s.yaml sonobuoy run --mode=quick --wait
- name: "Stopping k3s"
run: |
ssh default -- sudo systemctl stop k3s
# FIXME: rootful k3s processes are still running even after `systemctl stop k3s`, so we reboot the VM here.
# This reboot is also useful for ensuring `systemctl daemon-reload`: https://github.com/rootless-containers/rootlesscontaine.rs/issues/32
cd ./tests/cgroup2
vagrant halt
vagrant up
- name: "[Rootless] Starting k3s-rootless"
run: |
ssh default -- systemctl --user start k3s-rootless
- name: "[Rootless] Waiting for CoreDNS to be ready"
run: |
ssh default -- KUBECONFIG=/home/vagrant/.kube/k3s.yaml /vagrant/util/wait-for-coredns.sh
- name: "[Rootless] Run Sonobuoy (--mode=quick)"
run: |
ssh default -- KUBECONFIG=/home/vagrant/.kube/k3s.yaml sonobuoy run --mode=quick --wait

View File

@ -1,11 +1,21 @@
name: Integration Test Coverage
on:
push:
paths-ignore:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/vagrant/**"
- ".github/**"
- "!.github/workflows/integration.yaml"
pull_request:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/vagrant/**"
- ".github/**"
- "!.github/workflows/integration.yaml"
workflow_dispatch: {}
jobs:
build:
@ -48,6 +58,8 @@ jobs:
chmod +x ./dist/artifacts/k3s
go test -coverpkg=./... -coverprofile=coverage.out ./pkg/... -run Integration
go tool cover -func coverage.out
# these tests do not relate to coverage and must be run separately
go test ./tests/integration/... -run Integration
- name: On Failure, Launch Debug Session
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3

View File

@ -1,11 +1,21 @@
name: Unit Test Coverage
on:
on:
push:
paths-ignore:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/vagrant/**"
- ".github/**"
- "!.github/workflows/unitcoverage.yaml"
pull_request:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/vagrant/**"
- ".github/**"
- "!.github/workflows/unitcoverage.yaml"
workflow_dispatch: {}
jobs:
test:

View File

@ -4,7 +4,7 @@ Go testing in K3s comes in 3 forms: Unit, Integration, and End-to-End (E2E). Thi
document will explain *when* each test should be written and *how* each test should be
generated, formatted, and run.
Note: all shell commands given are relateive to the root k3s repo directory.
Note: all shell commands given are relative to the root k3s repo directory.
___
## Unit Tests
@ -60,25 +60,25 @@ To facilitate K3s CLI testing, see `tests/util/cmd.go` helper functions.
Integration tests can be placed in two areas:
1. Next to the go package they intend to test.
2. In `tests/integration/` for package agnostic testing.
2. In `tests/integration/<TESTNAME>` for package agnostic testing.
Package specific integration tests should use the `<PACKAGE_UNDER_TEST>_test` package.
Package agnostic integration tests should use the `integration` package.
All integration test files should be named: `<TEST_NAME>_int_test.go`
All integration test functions should be named: `Test_Integration<Test_Name>`.
See the [etcd snapshot test](https://github.com/k3s-io/k3s/blob/master/pkg/etcd/etcd_int_test.go) as a package specific example.
See the [local storage test](https://github.com/k3s-io/k3s/blob/master/tests/integration/localstorage_int_test.go) as a package agnostic example.
See the [local storage test](https://github.com/k3s-io/k3s/blob/master/tests/integration/localstorage/localstorage_int_test.go) as a package agnostic example.
### Running
Integration tests can be run with no k3s cluster present, each test will spin up and kill the appropriate k3s server it needs.
```bash
go test ./pkg/... ./tests/... -run Integration
go test ./pkg/... ./tests/integration/... -run Integration
```
Integration tests can be run on an existing single-node cluster via compile time flag, tests will skip if the server is not configured correctly.
```bash
go test -ldflags "-X 'github.com/rancher/k3s/tests/util.existingServer=True'" ./pkg/... ./tests/... -run Integration
go test -ldflags "-X 'github.com/rancher/k3s/tests/util.existingServer=True'" ./pkg/... ./tests/integration/... -run Integration
```
Integration tests can also be run via a [Sonobuoy](https://sonobuoy.io/docs/v0.53.2/) plugin on an existing single-node cluster.
@ -95,6 +95,70 @@ sudo KUBECONFIG=/etc/rancher/k3s/k3s.yaml sonobuoy results <TAR_FILE_FROM_RETRIE
___
## Smoke Tests
Smoke tests are defined under the [tests/vagrant](../tests/vagrant) path at the root of this repository.
The sub-directories therein contain fixtures for running simple clusters to assert correct behavior for "happy path"
scenarios. These fixtures are mostly self-contained Vagrantfiles describing single-node installations that are
easily spun up with Vagrant for the `libvirt` and `virtualbox` providers:
- [Control Groups](../tests/vagrant/cgroup) :arrow_right: on any code change
- [mode=unified](../tests/vagrant/cgroup/unified) (cgroups v2)
- [Fedora 34](../tests/vagrant/cgroup/unified/fedora-34) (rootfull + rootless)
When adding new smoke test(s) please copy the prevalent style for the `Vagrantfile`.
Ideally, the boxes used for additional assertions will support the default `virtualbox` provider which
enables them to be used by our Github Actions Workflow(s). See:
- [cgroup.yaml](../.github/workflows/cgroup.yaml).
### Framework
If you are new to Vagrant, Hashicorp has written some pretty decent introductory tutorials and docs, see:
- https://learn.hashicorp.com/collections/vagrant/getting-started
- https://www.vagrantup.com/docs/installation
#### Plugins and Providers
The `libvirt` and `vmware_desktop` providers cannot be used without first [installing the relevant plugins](https://www.vagrantup.com/docs/cli/plugin#plugin-install)
which are [`vagrant-libvirt`](https://github.com/vagrant-libvirt/vagrant-libvirt) and
[`vagrant-vmware-desktop`](https://www.vagrantup.com/docs/providers/vmware/installation), respectively.
Much like the default [`virtualbox` provider](https://www.vagrantup.com/docs/providers/virtualbox) these will do
nothing useful without also installing the relevant server runtimes and/or client programs.
#### Environment Variables
These can be set on the CLI or exported before invoking Vagrant:
- `TEST_VM_CPUS` (default :arrow_right: 2)<br/>
The number of vCPU for the guest to use.
- `TEST_VM_MEMORY` (default :arrow_right: 2048)<br/>
The number of megabytes of memory for the guest to use.
- `TEST_VM_BOOT_TIMEOUT` (default :arrow_right: 600)<br/>
The time in seconds that Vagrant will wait for the machine to boot and be accessible.
### Running
The **Install Script** tests can be run by changing to the fixture directory and invoking `vagrant up`, e.g.:
```shell
cd tests/vagrant/install/centos-8
vagrant up
# the following provisioners are optional. the do not run by default but are invoked
# explicitly by github actions workflow to avoid certain timeout issues on slow runners
vagrant provision --provision-with=k3s-wait-for-node
vagrant provision --provision-with=k3s-wait-for-coredns
vagrant provision --provision-with=k3s-wait-for-local-storage
vagrant provision --provision-with=k3s-wait-for-metrics-server
vagrant provision --provision-with=k3s-wait-for-traefik
vagrant provision --provision-with=k3s-status
vagrant provision --provision-with=k3s-procps
```
The **Control Groups** and **Snapshotter** tests require that k3s binary is built at `dist/artifacts/k3s`.
They are invoked similarly, i.e. `vagrant up`, but with different sets of named shell provisioners.
Take a look at the individual Vagrantfiles and/or the Github Actions workflows that harness them to get
an idea of how they can be invoked.
___
## End-to-End (E2E) Tests
End-to-end tests utilize [Ginkgo](https://onsi.github.io/ginkgo/) and [Gomega](https://onsi.github.io/gomega/) like the integration tests, but rely on separate testing utilities and are self-contained within the `test/e2e` directory. E2E tests cover complete K3s single and multi-cluster configuration and administration: bringup, update, teardown etc.
@ -111,7 +175,7 @@ See the [upgrade cluster test](https://github.com/k3s-io/k3s/blob/master/tests/e
Generally, E2E tests are run as a nightly Jenkins job for QA. They can still be run locally but additional setup may be required.
```bash
go test ./tests/... -run E2E
go test ./tests/e2e... -run E2E
```
## Contributing New Or Updated Tests

View File

@ -1,5 +0,0 @@
k3s
k3s.service
k3s-rootless.service
util/
.vagrant/

View File

@ -1,61 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrant box for testing k3s with cgroup v2.
# Invoked via k3s/.github/workflows/cgroup2.yaml .
#
# The following files need to be present in this directory:
# - k3s
# - k3s.service
# - k3s-rootless.service
# - util/
Vagrant.configure("2") do |config|
config.vm.box = "fedora/34-cloud-base"
memory = 2048
cpus = 2
config.vm.provider :virtualbox do |v|
v.memory = memory
v.cpus = cpus
end
config.vm.provider :libvirt do |v|
v.memory = memory
v.cpus = cpus
end
config.vm.provision "install-k3s", type: "shell", run: "once" do |sh|
sh.inline = <<~SHELL
set -eux -o pipefail
# Install k3s binary
install -m 755 /vagrant/k3s /usr/local/bin
ln -sf /usr/local/bin/k3s /usr/local/bin/kubectl
# Install k3s systemd service (not launched here)
cp -f /vagrant/k3s.service /etc/systemd/system/k3s.service
touch /etc/systemd/system/k3s.service.env
systemctl daemon-reload
# Install sonobuoy binary
curl -fsSL https://github.com/vmware-tanzu/sonobuoy/releases/download/v0.20.0/sonobuoy_0.20.0_linux_amd64.tar.gz | tar xzvC /usr/local/bin sonobuoy
# [Rootless] Configure sysctl
echo "net.ipv4.ip_forward=1" > /etc/sysctl.d/rootless.conf
sysctl --system
# [Rootless] Enable cgroup v2 delegation
mkdir -p /etc/systemd/system/user@.service.d
cat <<-EOF > /etc/systemd/system/user@.service.d/delegate.conf
[Service]
Delegate=yes
EOF
systemctl daemon-reload
# [Rootless] Enable systemd lingering
loginctl enable-linger vagrant
# [Rootless] Install k3s-rootless systemd service (not launched here)
mkdir -p /home/vagrant/.config/systemd/user
cp -f /vagrant/k3s-rootless.service /home/vagrant/.config/systemd/user/k3s-rootless.service
chown -R vagrant:vagrant /home/vagrant/.config
SHELL
end
end

View File

@ -1,6 +1,7 @@
package integration
import (
"os"
"strings"
"testing"
@ -10,11 +11,17 @@ import (
testutil "github.com/rancher/k3s/tests/util"
)
var dualStackServerArgs = []string{"--cluster-init", "--cluster-cidr 10.42.0.0/16,2001:cafe:42:0::/56", "--service-cidr 10.43.0.0/16,2001:cafe:42:1::/112"}
var dualStackServer *testutil.K3sServer
var dualStackServerArgs = []string{
"--cluster-init",
"--cluster-cidr 10.42.0.0/16,2001:cafe:42:0::/56",
"--service-cidr 10.43.0.0/16,2001:cafe:42:1::/112",
"--disable-network-policy",
}
var _ = BeforeSuite(func() {
if !testutil.IsExistingServer() {
if !testutil.IsExistingServer() && os.Getenv("CI") != "true" {
var err error
server, err = testutil.K3sStartServer(dualStackServerArgs...)
dualStackServer, err = testutil.K3sStartServer(dualStackServerArgs...)
Expect(err).ToNot(HaveOccurred())
}
})
@ -23,27 +30,29 @@ var _ = Describe("dual stack", func() {
BeforeEach(func() {
if testutil.IsExistingServer() && !testutil.ServerArgsPresent(dualStackServerArgs) {
Skip("Test needs k3s server with: " + strings.Join(dualStackServerArgs, " "))
} else if os.Getenv("CI") == "true" {
Skip("Github environment does not support IPv6")
}
})
When("a ipv4 and ipv6 cidr is present", func() {
It("starts up with no problems", func() {
Eventually(func() (string, error) {
return testutil.K3sCmd("kubectl", "get", "pods", "-A")
}, "90s", "1s").Should(MatchRegexp("kube-system.+traefik.+1\\/1.+Running"))
}, "180s", "5s").Should(MatchRegexp("kube-system.+traefik.+1\\/1.+Running"))
})
It("creates pods with two IPs", func() {
podname, err := testutil.K3sCmd("kubectl", "get", "pods", "-nkube-system", "-ojsonpath={.items[?(@.metadata.labels.app\\.kubernetes\\.io/name==\"traefik\")].metadata.name}")
Expect(err).NotTo(HaveOccurred())
result, err := testutil.K3sCmd("kubectl", "exec", podname, "-nkube-system", "--", "ip", "a")
Expect(result).To(ContainSubstring("2001:cafe:42:"))
podname, err := testutil.K3sCmd("kubectl", "get", "pods", "-n", "kube-system", "-o", "jsonpath={.items[?(@.metadata.labels.app\\.kubernetes\\.io/name==\"traefik\")].metadata.name}")
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
return testutil.K3sCmd("kubectl", "exec", podname, "-n", "kube-system", "--", "ip", "a")
}, "5s", "1s").Should(ContainSubstring("2001:cafe:42:"))
})
})
})
var _ = AfterSuite(func() {
if !testutil.IsExistingServer() {
Expect(testutil.K3sKillServer(server)).To(Succeed())
if !testutil.IsExistingServer() && os.Getenv("CI") != "true" {
Expect(testutil.K3sKillServer(dualStackServer)).To(Succeed())
}
})

View File

@ -13,20 +13,21 @@ import (
testutil "github.com/rancher/k3s/tests/util"
)
var server *testutil.K3sServer
var serverArgs = []string{"--cluster-init"}
var localStorageServer *testutil.K3sServer
var localStorageServerArgs = []string{"--cluster-init"}
var testDataDir = "../../testdata/"
var _ = BeforeSuite(func() {
if !testutil.IsExistingServer() {
var err error
server, err = testutil.K3sStartServer(serverArgs...)
localStorageServer, err = testutil.K3sStartServer(localStorageServerArgs...)
Expect(err).ToNot(HaveOccurred())
}
})
var _ = Describe("local storage", func() {
BeforeEach(func() {
if testutil.IsExistingServer() && !testutil.ServerArgsPresent(serverArgs) {
Skip("Test needs k3s server with: " + strings.Join(serverArgs, " "))
if testutil.IsExistingServer() && !testutil.ServerArgsPresent(localStorageServerArgs) {
Skip("Test needs k3s server with: " + strings.Join(localStorageServerArgs, " "))
}
})
When("a new local storage is created", func() {
@ -36,12 +37,12 @@ var _ = Describe("local storage", func() {
}, "90s", "1s").Should(MatchRegexp("kube-system.+coredns.+1\\/1.+Running"))
})
It("creates a new pvc", func() {
result, err := testutil.K3sCmd("kubectl", "create", "-f", "../testdata/localstorage_pvc.yaml")
result, err := testutil.K3sCmd("kubectl", "create", "-f", testDataDir+"localstorage_pvc.yaml")
Expect(result).To(ContainSubstring("persistentvolumeclaim/local-path-pvc created"))
Expect(err).NotTo(HaveOccurred())
})
It("creates a new pod", func() {
Expect(testutil.K3sCmd("kubectl", "create", "-f", "../testdata/localstorage_pod.yaml")).
Expect(testutil.K3sCmd("kubectl", "create", "-f", testDataDir+"localstorage_pod.yaml")).
To(ContainSubstring("pod/volume-test created"))
})
It("shows storage up in kubectl", func() {
@ -81,7 +82,7 @@ var _ = Describe("local storage", func() {
var _ = AfterSuite(func() {
if !testutil.IsExistingServer() {
Expect(testutil.K3sKillServer(server)).To(Succeed())
Expect(testutil.K3sKillServer(localStorageServer)).To(Succeed())
}
})

View File

@ -15,6 +15,14 @@ import (
// Compile-time variable
var existingServer = "False"
const lockFile = "/var/lock/k3s-test.lock"
type K3sServer struct {
cmd *exec.Cmd
scanner *bufio.Scanner
lock int
}
func findK3sExecutable() string {
// if running on an existing cluster, it maybe installed via k3s.service
// or run manually from dist/artifacts/k3s
@ -25,7 +33,8 @@ func findK3sExecutable() string {
}
}
k3sBin := "dist/artifacts/k3s"
for {
i := 0
for ; i < 20; i++ {
_, err := os.Stat(k3sBin)
if err != nil {
k3sBin = "../" + k3sBin
@ -33,6 +42,9 @@ func findK3sExecutable() string {
}
break
}
if i == 20 {
logrus.Fatal("Unable to find k3s executable")
}
return k3sBin
}
@ -112,22 +124,21 @@ func FindStringInCmdAsync(scanner *bufio.Scanner, target string) bool {
return false
}
type K3sServer struct {
cmd *exec.Cmd
scanner *bufio.Scanner
lock int
}
// K3sStartServer acquires an exclusive lock on a temporary file, then launches a k3s cluster
// with the provided arguments. Subsequent/parallel calls to this function will block until
// the original lock is cleared using K3sKillServer
func K3sStartServer(cmdArgs ...string) (*K3sServer, error) {
func K3sStartServer(inputArgs ...string) (*K3sServer, error) {
logrus.Info("waiting to get server lock")
k3sLock, err := flock.Acquire("/var/lock/k3s-test.lock")
k3sLock, err := flock.Acquire(lockFile)
if err != nil {
return nil, err
}
var cmdArgs []string
for _, arg := range inputArgs {
cmdArgs = append(cmdArgs, strings.Fields(arg)...)
}
k3sBin := findK3sExecutable()
var cmd *exec.Cmd
if IsRoot() {
@ -157,5 +168,11 @@ func K3sKillServer(server *K3sServer) error {
return err
}
}
return flock.Release(server.lock)
if err := flock.Release(server.lock); err != nil {
return err
}
if !flock.CheckLock(lockFile) {
return os.RemoveAll(lockFile)
}
return nil
}

1
tests/vagrant/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.vagrant/

View File

@ -0,0 +1,120 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
#
# Vagrant box for testing k3s with cgroup v2.
ENV['TEST_UNITFILE_ROOTFULL'] ||= '../../../../../k3s.service'
ENV['TEST_UNITFILE_ROOTLESS'] ||= '../../../../../k3s-rootless.service'
Vagrant.configure("2") do |config|
config.vagrant.plugins = {
'vagrant-k3s' => {:version => '~> 0.1.3'},
}
config.vm.box = "fedora/34-cloud-base"
config.vm.boot_timeout = ENV['TEST_VM_BOOT_TIMEOUT'] || 600 # seconds
config.vm.synced_folder '../../../../../dist/artifacts', '/vagrant', type: 'rsync', disabled: false,
rsync__exclude: ENV['RSYNC_EXCLUDE'] || '*.tar.*'
config.vm.define 'cgroup-unified', primary: true do |test|
test.vm.hostname = 'smoke'
test.vm.provision :file, run: 'always', source: ENV['TEST_UNITFILE_ROOTFULL'], destination: 'k3s-rootfull.service'
test.vm.provision :file, run: 'always', source: ENV['TEST_UNITFILE_ROOTLESS'], destination: 'k3s-rootless.service'
test.vm.provision 'k3s-prepare', type: "shell", run: "once", privileged: true do |sh|
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eux -o pipefail
# Install k3s binary
install -vm 755 /vagrant/k3s /usr/local/bin
# Install k3s SELinux policy
dnf install -y https://github.com/k3s-io/k3s-selinux/releases/download/v0.5.testing.2/k3s-selinux-0.5-2.el8.noarch.rpm
# Install k3s systemd service (not launched here)
install -vm 644 -T /home/vagrant/k3s-rootfull.service /etc/systemd/system/k3s-server.service
touch /etc/systemd/system/k3s-server.service.env
systemctl daemon-reload
# Install sonobuoy binary
curl -fsSL https://github.com/vmware-tanzu/sonobuoy/releases/download/v0.20.0/sonobuoy_0.20.0_linux_amd64.tar.gz | tar xzvC /usr/local/bin sonobuoy
# [Rootless] Configure sysctl
echo "net.ipv4.ip_forward=1" > /etc/sysctl.d/rootless.conf
sysctl --system
# [Rootless] Enable cgroup v2 delegation
mkdir -p /etc/systemd/system/user@.service.d
cat <<-EOF > /etc/systemd/system/user@.service.d/delegate.conf
[Service]
Delegate=yes
EOF
systemctl daemon-reload
# [Rootless] Enable systemd lingering
loginctl enable-linger vagrant
# [Rootless] Install k3s-rootless systemd service (not launched here)
mkdir -p /home/vagrant/.config/systemd/user
cp -f /home/vagrant/k3s-rootless.service /home/vagrant/.config/systemd/user/k3s-rootless.service
chown -R vagrant:vagrant /home/vagrant/.config
SHELL
end
test.vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %w[server]
k3s.env = %w[INSTALL_K3S_NAME=server INSTALL_K3S_SKIP_DOWNLOAD=true K3S_TOKEN=vagrant INSTALL_K3S_SKIP_ENABLE=true]
k3s.config = {
'disable' => %w[local-storage metrics-server servicelb traefik],
'disable-helm-controller' => true,
'disable-network-policy' => true,
'write-kubeconfig-mode' => '0644',
}
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
test.vm.provision "k3s-ready", type: "shell", run: ENV['CI'] == 'true' ? 'never' : 'once' do |sh|
sh.env = {
:PATH => "/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin",
:KUBECONFIG => ENV['TEST_KUBECONFIG'] || '/etc/rancher/k3s/k3s.yaml',
}
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eu -o pipefail
echo 'Waiting for node to be ready ...'
time timeout 500 bash -c 'while ! (kubectl wait --for condition=ready node/$(hostname) 2>/dev/null); do sleep 5; done'
time timeout 500 bash -c 'while ! (kubectl --namespace kube-system rollout status --timeout 10s deploy/coredns 2>/dev/null); do sleep 5; done'
SHELL
end
test.vm.provision "k3s-status", type: "shell", run: ENV['CI'] == 'true' ? 'never' : 'once' do |sh|
sh.env = {
:PATH => "/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin",
:KUBECONFIG => ENV['TEST_KUBECONFIG'] || '/etc/rancher/k3s/k3s.yaml',
}
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eux -o pipefail
kubectl get node,all -A -o wide
SHELL
end
test.vm.provision "k3s-sonobuoy", type: "shell", run: ENV['CI'] == 'true' ? 'never' : 'once' do |sh|
sh.env = {
:PATH => "/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin",
:KUBECONFIG => ENV['TEST_KUBECONFIG'] || '/etc/rancher/k3s/k3s.yaml',
:RESULTS_PATH => ENV['TEST_RESULTS_PATH'] || '.',
}
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eux -o pipefail
sonobuoy run --mode=quick --wait
sonobuoy retrieve ${RESULTS_PATH}
sonobuoy results $(ls -rt ${RESULTS_PATH}/*.tar.gz | tail -1) | grep Status | grep passed
SHELL
end
end
config.vm.provision 'selinux-status', type: 'shell', run: 'once', inline: 'sestatus'
%w[libvirt virtualbox].each do |p|
config.vm.provider p do |v|
v.cpus = ENV['TEST_VM_CPUS'] || 2
v.memory = ENV['TEST_VM_MEMORY'] || 2048
end
end
end