mirror of https://github.com/k3s-io/k3s
[Release-1.22] Backport E2E Testing Improvements (#5124)
* Fix cluster validation and add upgrade cluster test (#5020) Signed-off-by: Shylaja Devadiga <shylaja@rancher.com> Co-authored-by: Derek Nola <derek.nola@suse.com> Signed-off-by: Derek Nola <derek.nola@suse.com> * Migrate Ginkgo testing framework to V2, consolidate integration tests (#5097) * Upgrade and convert ginkgo from v1 to v2 * Move all integration tests into integration folder * Update TESTING.md Signed-off-by: Derek Nola <derek.nola@suse.com> * E2E Test Improvements (#5102) * Fix infinite while loop on failure, reduce upgradecluster * DRY code Signed-off-by: Derek Nola <derek.nola@suse.com>pull/5198/head
parent
e3d5310b5f
commit
d07edd72f6
5
go.mod
5
go.mod
|
@ -99,9 +99,8 @@ require (
|
|||
github.com/mattn/go-sqlite3 v1.14.8
|
||||
github.com/minio/minio-go/v7 v7.0.7
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible
|
||||
github.com/onsi/ginkgo v1.16.4
|
||||
github.com/onsi/gomega v1.11.0
|
||||
// LOOK TO scripts/download FOR THE VERSION OF runc THAT WE ARE BUILDING/SHIPPING
|
||||
github.com/onsi/ginkgo/v2 v2.1.1
|
||||
github.com/onsi/gomega v1.17.0
|
||||
github.com/opencontainers/runc v1.0.3
|
||||
github.com/opencontainers/selinux v1.8.2
|
||||
github.com/otiai10/copy v1.6.0
|
||||
|
|
6
go.sum
6
go.sum
|
@ -491,6 +491,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
|
@ -863,6 +864,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
|
|||
github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.1.1 h1:LCnPB85AvFNr91s0B2aDzEiiIg6MUwLYbryC1NSlWi8=
|
||||
github.com/onsi/ginkgo/v2 v2.1.1/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
|
@ -870,8 +873,9 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
|||
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug=
|
||||
github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg=
|
||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
|
|
|
@ -3,8 +3,7 @@ package flock_test
|
|||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/rancher/k3s/pkg/flock"
|
||||
)
|
||||
|
@ -31,7 +30,5 @@ var _ = Describe("file locks", func() {
|
|||
|
||||
func TestFlock(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "Flock Suite", []Reporter{
|
||||
reporters.NewJUnitReporter("/tmp/results/junit-flock.xml"),
|
||||
})
|
||||
RunSpecs(t, "Flock Suite")
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ To facilitate unit test creation, see `tests/util/runtime.go` helper functions.
|
|||
All unit tests should be placed within the package of the file they test.
|
||||
All unit test files should be named: `<FILE_UNDER_TEST>_test.go`.
|
||||
All unit test functions should be named: `Test_Unit<FUNCTION_TO_TEST>` or `Test_Unit<RECEIVER>_<METHOD_TO_TEST>`.
|
||||
See the [etcd unit test](https://github.com/k3s-io/k3s/blob/master/pkg/etcd/etcd_test.go) as an example.
|
||||
See the [etcd unit test](../pkg/etcd/etcd_test.go) as an example.
|
||||
|
||||
### Running
|
||||
|
||||
|
@ -62,29 +62,27 @@ To facilitate K3s CLI testing, see `tests/util/cmd.go` helper functions.
|
|||
|
||||
### Format
|
||||
|
||||
Integration tests can be placed in two areas:
|
||||
|
||||
1. Next to the go package they intend to test.
|
||||
2. In `tests/integration/<TEST_NAME>` for package agnostic testing.
|
||||
|
||||
Package specific integration tests should use the `<PACKAGE_UNDER_TEST>_test` package.
|
||||
Package agnostic integration tests should use the `integration` package.
|
||||
All integration tests should be placed under `tests/integration/<TEST_NAME>`.
|
||||
All integration test files should be named: `<TEST_NAME>_int_test.go`.
|
||||
All integration test functions should be named: `Test_Integration<TEST_NAME>`.
|
||||
See the [etcd snapshot test](https://github.com/k3s-io/k3s/blob/master/pkg/etcd/etcd_int_test.go) as a package specific example.
|
||||
See the [local storage test](https://github.com/k3s-io/k3s/blob/master/tests/integration/localstorage/localstorage_int_test.go) as a package agnostic example.
|
||||
See the [local storage test](../tests/integration/localstorage/localstorage_int_test.go) as an example.
|
||||
|
||||
### Running
|
||||
|
||||
Integration tests can be run with no k3s cluster present, each test will spin up and kill the appropriate k3s server it needs.
|
||||
Note: Integration tests must be run as root, prefix the commands below with `sudo -E env "PATH=$PATH"` if a sudo user.
|
||||
```bash
|
||||
go test ./pkg/... ./tests/integration/... -run Integration
|
||||
go test ./tests/integration/... -run Integration
|
||||
```
|
||||
|
||||
Additionally, to generate JUnit reporting for the tests, the Ginkgo CLI is used
|
||||
```
|
||||
ginkgo --junit-report=result.xml ./tests/integration/...
|
||||
```
|
||||
|
||||
Integration tests can be run on an existing single-node cluster via compile time flag, tests will skip if the server is not configured correctly.
|
||||
```bash
|
||||
go test -ldflags "-X 'github.com/rancher/k3s/tests/util.existingServer=True'" ./pkg/... ./tests/integration/... -run Integration
|
||||
go test -ldflags "-X 'github.com/rancher/k3s/tests/util.existingServer=True'" ./tests/integration/... -run Integration
|
||||
```
|
||||
|
||||
Integration tests can also be run via a [Sonobuoy](https://sonobuoy.io/docs/v0.53.2/) plugin on an existing single-node cluster.
|
||||
|
@ -191,10 +189,25 @@ See the [validate cluster test](../tests/e2e/validatecluster/validatecluster_tes
|
|||
|
||||
Generally, E2E tests are run as a nightly Jenkins job for QA. They can still be run locally but additional setup may be required. By default, all E2E tests are designed with `libvirt` as the underlying VM provider. Instructions for installing libvirt and its associated vagrant plugin, `vagrant-libvirt` can be found [here.](https://github.com/vagrant-libvirt/vagrant-libvirt#installation) `VirtualBox` is also supported as a backup VM provider.
|
||||
|
||||
Once setup is complete, E2E tests can be run with:
|
||||
Once setup is complete, all E2E tests can be run with:
|
||||
```bash
|
||||
go test ./tests/e2e/... -run E2E
|
||||
go test -timeout=15m ./tests/e2e/... -run E2E
|
||||
```
|
||||
Tests can be run individually with:
|
||||
```bash
|
||||
go test -timeout=15m ./tests/e2e/validatecluster/... -run E2E
|
||||
#or
|
||||
go test -timeout=15m ./tests/e2e/... -run E2EClusterValidation
|
||||
```
|
||||
|
||||
Additionally, to generate junit reporting for the tests, the Ginkgo CLI is used. Installation instructions can be found [here.](https://onsi.github.io/ginkgo/#getting-started)
|
||||
|
||||
To run the all E2E tests and generate JUnit testing reports:
|
||||
```
|
||||
ginkgo --junit-report=result.xml ./tests/e2e/...
|
||||
```
|
||||
|
||||
Note: The `go test` default timeout is 10 minutes, thus the `-timeout` flag should be used. The `ginkgo` default timeout is 1 hour, no timeout flag is needed.
|
||||
|
||||
## Contributing New Or Updated Tests
|
||||
|
||||
|
|
|
@ -22,5 +22,3 @@ WORKDIR $GOPATH/src/github.com/rancher/k3s-io/k3s/tests/e2e
|
|||
|
||||
COPY . .
|
||||
|
||||
RUN go get -u github.com/onsi/gomega
|
||||
RUN go get -u github.com/onsi/ginkgo
|
||||
|
|
|
@ -1,16 +1,19 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ingress
|
||||
name: test-ingress
|
||||
spec:
|
||||
rules:
|
||||
- host: foo1.bar.com
|
||||
http:
|
||||
paths:
|
||||
- path: /name.html
|
||||
backend:
|
||||
serviceName: nginx-ingress-svc
|
||||
servicePort: 80
|
||||
rules:
|
||||
- host: foo1.bar.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: nginx-ingress-svc
|
||||
port:
|
||||
number: 80
|
||||
path: /
|
||||
pathType: ImplementationSpecific
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
|
|
@ -4,7 +4,7 @@ go 1.15
|
|||
|
||||
require (
|
||||
github.com/kr/pretty v0.2.0 // indirect
|
||||
github.com/onsi/ginkgo v1.16.4
|
||||
github.com/onsi/ginkgo/v2 v2.1.1
|
||||
github.com/onsi/gomega v1.17.0
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
)
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
|
@ -19,7 +22,9 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
|||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
|
@ -32,6 +37,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
|||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.1.1 h1:LCnPB85AvFNr91s0B2aDzEiiIg6MUwLYbryC1NSlWi8=
|
||||
github.com/onsi/ginkgo/v2 v2.1.1/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
||||
|
@ -60,6 +67,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
|
@ -1,7 +1,15 @@
|
|||
#!/bin/bash
|
||||
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
|
||||
iterations=0
|
||||
curl -s -H 'Accept: application/vnd.github.v3+json' "https://api.github.com/repos/k3s-io/k3s/commits?per_page=5&sha=$1" | jq -r '.[] | .sha' &> $2
|
||||
curl -s --fail https://storage.googleapis.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum
|
||||
while [ $? -ne 0 ]; do
|
||||
sed -i 1d $2
|
||||
((iterations++))
|
||||
if [ "$iterations" -ge 6 ]; then
|
||||
echo "No valid commits found"
|
||||
exit 1
|
||||
fi
|
||||
sed -i 1d "$2"
|
||||
sleep 1
|
||||
curl -s --fail https://storage.googleapis.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum
|
||||
done
|
|
@ -3,8 +3,10 @@ package e2e
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
@ -27,53 +29,58 @@ type Pod struct {
|
|||
Node string
|
||||
}
|
||||
|
||||
//Runs command passed from within the node ServerIP
|
||||
func RunCmdOnNode(cmd string, nodename string) (string, error) {
|
||||
runcmd := "vagrant ssh server-0 -c " + cmd
|
||||
c := exec.Command("bash", "-c", runcmd)
|
||||
|
||||
var out bytes.Buffer
|
||||
var errOut bytes.Buffer
|
||||
c.Stdout = &out
|
||||
c.Stderr = &errOut
|
||||
if err := c.Run(); err != nil {
|
||||
return errOut.String(), err
|
||||
func CountOfStringInSlice(str string, pods []Pod) int {
|
||||
count := 0
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, str) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return out.String(), nil
|
||||
return count
|
||||
}
|
||||
|
||||
// RunCommand Runs command on the cluster accessing the cluster through kubeconfig file
|
||||
func RunCommand(cmd string) (string, error) {
|
||||
c := exec.Command("bash", "-c", cmd)
|
||||
var out bytes.Buffer
|
||||
c.Stdout = &out
|
||||
if err := c.Run(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return out.String(), nil
|
||||
}
|
||||
|
||||
func CreateCluster(nodeos string, serverCount int, agentCount int) ([]string, []string, error) {
|
||||
serverNodenames := make([]string, serverCount+1)
|
||||
func CreateCluster(nodeOS string, serverCount int, agentCount int, installType string) ([]string, []string, error) {
|
||||
serverNodeNames := make([]string, serverCount)
|
||||
for i := 0; i < serverCount; i++ {
|
||||
serverNodenames[i] = "server-" + strconv.Itoa(i)
|
||||
serverNodeNames[i] = "server-" + strconv.Itoa(i)
|
||||
}
|
||||
|
||||
agentNodenames := make([]string, agentCount+1)
|
||||
agentNodeNames := make([]string, agentCount)
|
||||
for i := 0; i < agentCount; i++ {
|
||||
agentNodenames[i] = "agent-" + strconv.Itoa(i)
|
||||
agentNodeNames[i] = "agent-" + strconv.Itoa(i)
|
||||
}
|
||||
nodeRoles := strings.Join(serverNodenames, " ") + strings.Join(agentNodenames, " ")
|
||||
nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(agentNodeNames, " ")
|
||||
|
||||
nodeRoles = strings.TrimSpace(nodeRoles)
|
||||
nodeBoxes := strings.Repeat(nodeos+" ", serverCount+agentCount)
|
||||
nodeBoxes := strings.Repeat(nodeOS+" ", serverCount+agentCount)
|
||||
nodeBoxes = strings.TrimSpace(nodeBoxes)
|
||||
cmd := fmt.Sprintf("NODE_ROLES=\"%s\" NODE_BOXES=\"%s\" vagrant up &> vagrant.log", nodeRoles, nodeBoxes)
|
||||
if out, err := RunCommand(cmd); err != nil {
|
||||
fmt.Println("Error Creating Cluster", out)
|
||||
cmd := fmt.Sprintf("NODE_ROLES=\"%s\" NODE_BOXES=\"%s\" %s vagrant up &> vagrant.log", nodeRoles, nodeBoxes, installType)
|
||||
fmt.Println(cmd)
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
fmt.Println("Error Creating Cluster", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return serverNodeNames, agentNodeNames, nil
|
||||
}
|
||||
|
||||
return serverNodenames, agentNodenames, nil
|
||||
func DeployWorkload(workload, kubeconfig string, arch bool) (string, error) {
|
||||
resource_dir := "../amd64_resource_files"
|
||||
if arch {
|
||||
resource_dir = "../arm64_resource_files"
|
||||
}
|
||||
files, err := ioutil.ReadDir(resource_dir)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%s : Unable to read resource manifest file for %s", err, workload)
|
||||
return "", err
|
||||
}
|
||||
fmt.Println("\nDeploying", workload)
|
||||
for _, f := range files {
|
||||
filename := filepath.Join(resource_dir, f.Name())
|
||||
if strings.TrimSpace(f.Name()) == workload {
|
||||
cmd := "kubectl apply -f " + filename + " --kubeconfig=" + kubeconfig
|
||||
return RunCommand(cmd)
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func DestroyCluster() error {
|
||||
|
@ -83,8 +90,36 @@ func DestroyCluster() error {
|
|||
return os.Remove("vagrant.log")
|
||||
}
|
||||
|
||||
func FetchClusterIP(kubeconfig string, servicename string) (string, error) {
|
||||
cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + kubeconfig
|
||||
return RunCommand(cmd)
|
||||
}
|
||||
|
||||
func FetchIngressIP(kubeconfig string) ([]string, error) {
|
||||
cmd := "kubectl get ing ingress -o jsonpath='{.status.loadBalancer.ingress[*].ip}' --kubeconfig=" + kubeconfig
|
||||
res, err := RunCommand(cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ingressIP := strings.Trim(res, " ")
|
||||
ingressIPs := strings.Split(ingressIP, " ")
|
||||
return ingressIPs, nil
|
||||
}
|
||||
|
||||
func FetchNodeExternalIP(nodename string) (string, error) {
|
||||
cmd := "vagrant ssh " + nodename + " -c \"ip -f inet addr show eth1| awk '/inet / {print $2}'|cut -d/ -f1\""
|
||||
ipaddr, err := RunCommand(cmd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ips := strings.Trim(ipaddr, "")
|
||||
ip := strings.Split(ips, "inet")
|
||||
nodeip := strings.TrimSpace(ip[1])
|
||||
return nodeip, nil
|
||||
}
|
||||
|
||||
func GenKubeConfigFile(serverName string) (string, error) {
|
||||
cmd := fmt.Sprintf("vagrant ssh %s -c \"cat /etc/rancher/k3s/k3s.yaml\"", serverName)
|
||||
cmd := fmt.Sprintf("vagrant ssh %s -c \"sudo cat /etc/rancher/k3s/k3s.yaml\"", serverName)
|
||||
kubeConfig, err := RunCommand(cmd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -101,44 +136,13 @@ func GenKubeConfigFile(serverName string) (string, error) {
|
|||
return kubeConfigFile, nil
|
||||
}
|
||||
|
||||
func DeployWorkload(workload string, kubeconfig string) (string, error) {
|
||||
cmd := "kubectl apply -f " + workload + " --kubeconfig=" + kubeconfig
|
||||
return RunCommand(cmd)
|
||||
}
|
||||
|
||||
func FetchClusterIP(kubeconfig string, servicename string) (string, error) {
|
||||
cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + kubeconfig
|
||||
return RunCommand(cmd)
|
||||
}
|
||||
|
||||
func FetchNodeExternalIP(nodename string) (string, error) {
|
||||
cmd := "vagrant ssh " + nodename + " -c \"ip -f inet addr show eth1| awk '/inet / {print $2}'|cut -d/ -f1\""
|
||||
ipaddr, err := RunCommand(cmd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ips := strings.Trim(ipaddr, "")
|
||||
ip := strings.Split(ips, "inet")
|
||||
nodeip := strings.TrimSpace(ip[1])
|
||||
return nodeip, nil
|
||||
}
|
||||
func FetchIngressIP(kubeconfig string) ([]string, error) {
|
||||
cmd := "kubectl get ing ingress -o jsonpath='{.status.loadBalancer.ingress[*].ip}' --kubeconfig=" + kubeconfig
|
||||
res, err := RunCommand(cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ingressIP := strings.Trim(res, " ")
|
||||
ingressIPs := strings.Split(ingressIP, " ")
|
||||
return ingressIPs, nil
|
||||
}
|
||||
|
||||
func ParseNodes(kubeConfig string, debug bool) ([]Node, error) {
|
||||
func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
|
||||
nodes := make([]Node, 0, 10)
|
||||
nodeList := ""
|
||||
|
||||
cmd := "kubectl get nodes --no-headers -o wide -A --kubeconfig=" + kubeConfig
|
||||
res, err := RunCommand(cmd)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -157,13 +161,13 @@ func ParseNodes(kubeConfig string, debug bool) ([]Node, error) {
|
|||
nodes = append(nodes, node)
|
||||
}
|
||||
}
|
||||
if debug {
|
||||
if print {
|
||||
fmt.Println(nodeList)
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func ParsePods(kubeconfig string, debug bool) ([]Pod, error) {
|
||||
func ParsePods(kubeconfig string, print bool) ([]Pod, error) {
|
||||
pods := make([]Pod, 0, 10)
|
||||
podList := ""
|
||||
|
||||
|
@ -186,8 +190,44 @@ func ParsePods(kubeconfig string, debug bool) ([]Pod, error) {
|
|||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
if debug {
|
||||
if print {
|
||||
fmt.Println(podList)
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
// RunCmdOnNode executes a command from within the given node
|
||||
func RunCmdOnNode(cmd string, nodename string) (string, error) {
|
||||
runcmd := "vagrant ssh -c " + cmd + " " + nodename
|
||||
return RunCommand(runcmd)
|
||||
}
|
||||
|
||||
// RunCommand executes a command on the host
|
||||
func RunCommand(cmd string) (string, error) {
|
||||
c := exec.Command("bash", "-c", cmd)
|
||||
var out bytes.Buffer
|
||||
c.Stdout = &out
|
||||
if err := c.Run(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return out.String(), nil
|
||||
}
|
||||
|
||||
func UpgradeCluster(serverNodenames []string, agentNodenames []string) error {
|
||||
for _, nodeName := range serverNodenames {
|
||||
cmd := "RELEASE_CHANNEL=commit vagrant provision " + nodeName
|
||||
fmt.Println(cmd)
|
||||
if out, err := RunCommand(cmd); err != nil {
|
||||
fmt.Println("Error Upgrading Cluster", out)
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, nodeName := range agentNodenames {
|
||||
cmd := "RELEASE_CHANNEL=commit vagrant provision " + nodeName
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
fmt.Println("Error Upgrading Cluster", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,9 +17,11 @@ def provision(vm, roles, role_num, node_num)
|
|||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
|
||||
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
|
||||
|
||||
osConfigure(vm)
|
||||
|
||||
vagrant_defaults = '../vagrantdefaults.rb'
|
||||
load vagrant_defaults if File.exists?(vagrant_defaults)
|
||||
|
||||
defaultOSConfigure(vm)
|
||||
|
||||
if !RELEASE_VERSION.empty?
|
||||
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
|
||||
elsif RELEASE_CHANNEL == "commit"
|
||||
|
@ -35,14 +37,12 @@ def provision(vm, roles, role_num, node_num)
|
|||
|
||||
if roles.include?("server") && role_num == 0
|
||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.installer_url = 'https://get.k3s.io'
|
||||
k3s.args = %W[server --cluster-init --node-external-ip=#{NETWORK_PREFIX}.100 --flannel-iface=eth1]
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
elsif roles.include?("server") && role_num != 0
|
||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.installer_url = 'https://get.k3s.io'
|
||||
k3s.args = %W[server --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
|
@ -50,7 +50,6 @@ def provision(vm, roles, role_num, node_num)
|
|||
end
|
||||
if roles.include?("agent")
|
||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.installer_url = 'https://get.k3s.io'
|
||||
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
|
@ -61,22 +60,6 @@ def provision(vm, roles, role_num, node_num)
|
|||
end
|
||||
end
|
||||
|
||||
def osConfigure(vm)
|
||||
|
||||
if vm.box.include?("ubuntu2004")
|
||||
vm.provision "shell", inline: "systemd-resolve --set-dns=8.8.8.8 --interface=eth0", run: 'once'
|
||||
vm.provision "shell", inline: "apt install -y jq", run: 'once'
|
||||
end
|
||||
if vm.box.include?("Leap")
|
||||
vm.provision "shell", inline: "zypper install -y jq", run: 'once'
|
||||
end
|
||||
if vm.box.include?("microos")
|
||||
vm.provision "shell", inline: "transactional-update pkg install -y jq", run: 'once'
|
||||
vm.provision 'reload', run: 'once'
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
|
||||
# Default provider is libvirt, virtualbox is only provided as a backup
|
||||
|
|
|
@ -0,0 +1,383 @@
|
|||
package upgradecluster
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/rancher/k3s/tests/e2e"
|
||||
)
|
||||
|
||||
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
|
||||
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
|
||||
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
|
||||
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
|
||||
|
||||
//valid format: RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
|
||||
var installType = flag.String("installType", "", "version or nil to use latest commit")
|
||||
|
||||
func Test_E2EUpgradeValidation(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
flag.Parse()
|
||||
RunSpecs(t, "Create Cluster Test Suite")
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodenames []string
|
||||
agentNodenames []string
|
||||
)
|
||||
|
||||
var _ = Describe("Verify Upgrade", func() {
|
||||
Context("Cluster :", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
serverNodenames, agentNodenames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount, *installType)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodenames)
|
||||
fmt.Println("Agent Nodes:", agentNodenames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodenames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
_, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, false)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
||||
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
|
||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
||||
cmd = "\"curl -L --insecure http://" + clusterip + "/name.html\""
|
||||
for _, nodeName := range serverNodenames {
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, nodeName)
|
||||
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies NodePort Service", func() {
|
||||
_, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodenames {
|
||||
node_external_ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
|
||||
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||
|
||||
cmd = "curl -L --insecure http://" + node_external_ip + ":" + nodeport + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies LoadBalancer Service", func() {
|
||||
_, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
|
||||
for _, nodeName := range serverNodenames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
|
||||
|
||||
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies Ingress", func() {
|
||||
_, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodenames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-ingress"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies Daemonset", func() {
|
||||
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
||||
|
||||
nodes, _ := e2e.ParseNodes(kubeConfigFile, false) //nodes :=
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("NODE COUNT")
|
||||
fmt.Println(len(nodes))
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
||||
}, "240s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies dns access", func() {
|
||||
_, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("dnsutils"))
|
||||
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
|
||||
})
|
||||
|
||||
It("Verifies Local Path Provisioner storage ", func() {
|
||||
_, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
|
||||
g.Expect(res).Should(ContainSubstring("Bound"))
|
||||
}, "240s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
_, err = e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("Data stored in pvc: local-path-test")
|
||||
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
|
||||
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("local-path-provisioner"))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
// Check data after re-creation
|
||||
Eventually(func() (string, error) {
|
||||
cmd = "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
|
||||
})
|
||||
|
||||
It("Upgrades with no issues", func() {
|
||||
var err error
|
||||
err = e2e.UpgradeCluster(serverNodenames, agentNodenames)
|
||||
fmt.Println(err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("CLUSTER UPGRADED")
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodenames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("After upgrade Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"))
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"))
|
||||
}
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
e2e.ParsePods(kubeConfigFile, true)
|
||||
})
|
||||
|
||||
It("After upgrade verifies ClusterIP Service", func() {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "5s").Should(ContainSubstring("test-clusterip"))
|
||||
|
||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
||||
cmd := "\"curl -L --insecure http://" + clusterip + "/name.html\""
|
||||
fmt.Println(cmd)
|
||||
for _, nodeName := range serverNodenames {
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, nodeName)
|
||||
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
It("After upgrade verifies NodePort Service", func() {
|
||||
|
||||
for _, nodeName := range serverNodenames {
|
||||
node_external_ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||
|
||||
cmd = "curl -L --insecure http://" + node_external_ip + ":" + nodeport + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-nodeport"))
|
||||
}
|
||||
})
|
||||
|
||||
It("After upgrade verifies LoadBalancer Service", func() {
|
||||
for _, nodeName := range serverNodenames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
|
||||
}
|
||||
})
|
||||
|
||||
It("After upgrade verifies Ingress", func() {
|
||||
for _, nodeName := range serverNodenames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "5s").Should(ContainSubstring("test-ingress"))
|
||||
}
|
||||
})
|
||||
|
||||
It("After upgrade verifies Daemonset", func() {
|
||||
nodes, _ := e2e.ParseNodes(kubeConfigFile, false) //nodes :=
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("NODE COUNT")
|
||||
fmt.Println(len(nodes))
|
||||
g.Expect(len(nodes)).Should(Equal(count), "Daemonset pod count does not match node count")
|
||||
}, "420s", "1s").Should(Succeed())
|
||||
})
|
||||
It("After upgrade verifies dns access", func() {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "180s", "2s").Should((ContainSubstring("kubernetes.default.svc.cluster.local")))
|
||||
})
|
||||
|
||||
It("After upgrade verify Local Path Provisioner storage ", func() {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var failed = false
|
||||
var _ = AfterEach(func() {
|
||||
failed = failed || CurrentGinkgoTestDescription().Failed
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
fmt.Println("FAILED!")
|
||||
} else {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
|
@ -0,0 +1,13 @@
|
|||
def defaultOSConfigure(vm)
|
||||
if vm.box.include?("ubuntu2004")
|
||||
vm.provision "shell", inline: "systemd-resolve --set-dns=8.8.8.8 --interface=eth0"
|
||||
vm.provision "shell", inline: "apt install -y jq"
|
||||
end
|
||||
if vm.box.include?("Leap")
|
||||
vm.provision "shell", inline: "zypper install -y jq"
|
||||
end
|
||||
if vm.box.include?("microos")
|
||||
vm.provision "shell", inline: "transactional-update pkg install -y jq"
|
||||
vm.provision 'reload', run: 'once'
|
||||
end
|
||||
end
|
|
@ -17,7 +17,10 @@ def provision(vm, roles, role_num, node_num)
|
|||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
|
||||
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
|
||||
|
||||
osConfigure(vm)
|
||||
vagrant_defaults = '../vagrantdefaults.rb'
|
||||
load vagrant_defaults if File.exists?(vagrant_defaults)
|
||||
|
||||
defaultOSConfigure(vm)
|
||||
|
||||
if !RELEASE_VERSION.empty?
|
||||
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
|
||||
|
@ -30,14 +33,12 @@ def provision(vm, roles, role_num, node_num)
|
|||
|
||||
if roles.include?("server") && role_num == 0
|
||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.installer_url = 'https://get.k3s.io'
|
||||
k3s.args = %W[server --cluster-init --node-external-ip=#{NETWORK_PREFIX}.100 --flannel-iface=eth1]
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
elsif roles.include?("server") && role_num != 0
|
||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.installer_url = 'https://get.k3s.io'
|
||||
k3s.args = %W[server --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
|
@ -45,7 +46,6 @@ def provision(vm, roles, role_num, node_num)
|
|||
end
|
||||
if roles.include?("agent")
|
||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.installer_url = 'https://get.k3s.io'
|
||||
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
|
@ -56,22 +56,6 @@ def provision(vm, roles, role_num, node_num)
|
|||
end
|
||||
end
|
||||
|
||||
def osConfigure(vm)
|
||||
|
||||
if vm.box.include?("ubuntu2004")
|
||||
vm.provision "shell", inline: "systemd-resolve --set-dns=8.8.8.8 --interface=eth0"
|
||||
vm.provision "shell", inline: "apt install -y jq"
|
||||
end
|
||||
if vm.box.include?("Leap")
|
||||
vm.provision "shell", inline: "zypper install -y jq"
|
||||
end
|
||||
if vm.box.include?("microos")
|
||||
vm.provision "shell", inline: "transactional-update pkg install -y jq"
|
||||
vm.provision 'reload', run: 'once'
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
|
||||
# Default provider is libvirt, virtualbox is only provided as a backup
|
||||
|
|
|
@ -1,72 +1,271 @@
|
|||
package validatecluster
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/rancher/k3s/tests/e2e"
|
||||
)
|
||||
|
||||
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
|
||||
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
|
||||
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
|
||||
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
|
||||
|
||||
//valid format: RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
|
||||
var installType = flag.String("installType", "", "version or nil to use latest commit")
|
||||
|
||||
func Test_E2EClusterValidation(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "Create Cluster Test Suite", []Reporter{
|
||||
reporters.NewJUnitReporter("/tmp/results/junit-cc.xml"),
|
||||
})
|
||||
flag.Parse()
|
||||
RunSpecs(t, "Create Cluster Test Suite")
|
||||
}
|
||||
|
||||
const (
|
||||
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64
|
||||
nodeOs = "generic/ubuntu2004"
|
||||
serverCount = 3
|
||||
agentCount = 2
|
||||
)
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
serverNodenames []string
|
||||
agentNodenames []string
|
||||
)
|
||||
|
||||
var _ = Describe("Verify Cluster Creation", func() {
|
||||
Context("Create the Cluster", func() {
|
||||
var _ = Describe("Verify Create", func() {
|
||||
Context("Cluster :", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(nodeOs, serverCount, agentCount)
|
||||
serverNodenames, agentNodenames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount, *installType)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", nodeOs)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodenames)
|
||||
fmt.Println("Agent Nodes:", agentNodenames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodenames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
It("Verify Node and Pod Status", func() {
|
||||
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, true)
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"), func() string { return node.Name })
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, true)
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), func() string { return pod.Name })
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), func() string { return pod.Name })
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
_, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should((ContainSubstring("test-clusterip")))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
||||
cmd := "\"curl -L --insecure http://" + clusterip + "/name.html\""
|
||||
fmt.Println(cmd)
|
||||
for _, nodeName := range serverNodenames {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
Expect(res).Should(ContainSubstring("test-clusterip"))
|
||||
}, "120s", "10s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies NodePort Service", func() {
|
||||
_, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodenames {
|
||||
node_external_ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
cmd = "curl -L --insecure http://" + node_external_ip + ":" + nodeport + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies LoadBalancer Service", func() {
|
||||
_, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodenames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies Ingress", func() {
|
||||
_, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodenames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies Daemonset", func() {
|
||||
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
||||
|
||||
nodes, _ := e2e.ParseNodes(kubeConfigFile, false)
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("NODE COUNT")
|
||||
fmt.Println(len(nodes))
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
||||
}, "420s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies dns access", func() {
|
||||
_, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
res, _ := e2e.RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("dnsutils"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
fmt.Println(cmd)
|
||||
res, _ := e2e.RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies Local Path Provisioner storage ", func() {
|
||||
_, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
|
||||
g.Expect(res).Should(ContainSubstring("Bound"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
_, err = e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("Data stored in pvc: local-path-test")
|
||||
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
|
||||
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
|
||||
res, _ := e2e.RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("local-path-provisioner"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd = "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
|
||||
res, err = e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("Data after re-creation", res)
|
||||
g.Expect(res).Should(ContainSubstring("local-path-test"))
|
||||
}, "180s", "2s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -5,8 +5,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
testutil "github.com/rancher/k3s/tests/util"
|
||||
)
|
||||
|
@ -62,7 +61,5 @@ var _ = AfterSuite(func() {
|
|||
|
||||
func Test_IntegrationCustomEtcdArgs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "Custom etcd Arguments", []Reporter{
|
||||
reporters.NewJUnitReporter("/tmp/results/junit-ls.xml"),
|
||||
})
|
||||
RunSpecs(t, "Custom etcd Arguments")
|
||||
}
|
||||
|
|
|
@ -5,8 +5,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
testutil "github.com/rancher/k3s/tests/util"
|
||||
)
|
||||
|
@ -63,7 +62,5 @@ var _ = AfterSuite(func() {
|
|||
|
||||
func Test_IntegrationDualStack(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "Dual-Stack Suite", []Reporter{
|
||||
reporters.NewJUnitReporter("/tmp/results/junit-ls.xml"),
|
||||
})
|
||||
RunSpecs(t, "Dual-Stack Suite")
|
||||
}
|
||||
|
|
|
@ -4,8 +4,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
testutil "github.com/rancher/k3s/tests/util"
|
||||
)
|
||||
|
@ -117,7 +116,5 @@ var _ = AfterSuite(func() {
|
|||
|
||||
func Test_IntegrationEtcdRestoreSnapshot(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "Etcd Suite", []Reporter{
|
||||
reporters.NewJUnitReporter("/tmp/results/junit-etcd-restore.xml"),
|
||||
})
|
||||
RunSpecs(t, "Etcd Restore Suite")
|
||||
}
|
||||
|
|
|
@ -6,8 +6,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
testutil "github.com/rancher/k3s/tests/util"
|
||||
)
|
||||
|
@ -123,7 +122,5 @@ var _ = AfterSuite(func() {
|
|||
|
||||
func Test_IntegrationEtcdSnapshot(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "Etcd Suite", []Reporter{
|
||||
reporters.NewJUnitReporter("/tmp/results/junit-etcd.xml"),
|
||||
})
|
||||
RunSpecs(t, "Etcd Snapshot Suite")
|
||||
}
|
||||
|
|
|
@ -7,8 +7,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
testutil "github.com/rancher/k3s/tests/util"
|
||||
)
|
||||
|
@ -92,7 +91,5 @@ var _ = AfterSuite(func() {
|
|||
|
||||
func Test_IntegrationLocalStorage(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "Local Storage Suite", []Reporter{
|
||||
reporters.NewJUnitReporter("/tmp/results/junit-ls.xml"),
|
||||
})
|
||||
RunSpecs(t, "Local Storage Suite")
|
||||
}
|
||||
|
|
|
@ -6,8 +6,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
testutil "github.com/rancher/k3s/tests/util"
|
||||
)
|
||||
|
@ -150,7 +149,5 @@ var _ = AfterSuite(func() {
|
|||
|
||||
func Test_IntegrationSecretsEncryption(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "Secrets Encryption Suite", []Reporter{
|
||||
reporters.NewJUnitReporter("/tmp/results/junit-se.xml"),
|
||||
})
|
||||
RunSpecs(t, "Secrets Encryption Suite")
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue