[Release-1.24] Add E2E testing in Drone (#7376)

* Initial drone vagrant pipeline

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Build e2e test image
* Add ci flag to secretsencryption
* Fix vagrant log on secretsencryption
* Add cron conformance pipeline
* Add string output for nodes
* Switch snapshot restore for upgrade cluster

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Added IPv6 check and agent restart on e2e test utils

Signed-off-by: Roberto Bonafiglia <roberto.bonafiglia@suse.com>

* Drone: Cleanup E2E VMs on test panic (#7104)

* Cleanup leftover VMs in E2E pipeline

* Clean E2E VMs before testing (#7109)

* Cleanup VMs proper

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Dont run most pipelines on nightly cron
* Improve RunCmdOnNode error
* Pin upgradecluster to v1.24

Signed-off-by: Derek Nola <derek.nola@suse.com>

---------

Signed-off-by: Derek Nola <derek.nola@suse.com>
Signed-off-by: Roberto Bonafiglia <roberto.bonafiglia@suse.com>
Co-authored-by: Roberto Bonafiglia <roberto.bonafiglia@suse.com>
pull/7407/head
Derek Nola 2023-05-01 14:14:28 -07:00 committed by GitHub
parent 3f79b28922
commit 8f27774e8b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 328 additions and 65 deletions

View File

@ -6,6 +6,11 @@ platform:
os: linux
arch: amd64
trigger:
event:
exclude:
- cron
steps:
- name: build
image: rancher/dapper:v0.5.0
@ -128,6 +133,48 @@ volumes:
host:
path: /var/run/docker.sock
---
kind: pipeline
name: conformance
platform:
os: linux
arch: amd64
trigger:
event:
- cron
cron:
- nightly
steps:
- name: build
image: rancher/dapper:v0.5.0
commands:
- dapper ci
- echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags
volumes:
- name: docker
path: /var/run/docker.sock
- name: test
image: rancher/dapper:v0.5.0
environment:
ENABLE_REGISTRY: 'true'
commands:
- docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}
volumes:
- name: docker
path: /var/run/docker.sock
volumes:
- name: docker
host:
path: /var/run/docker.sock
---
kind: pipeline
name: arm64
@ -136,6 +183,11 @@ platform:
os: linux
arch: arm64
trigger:
event:
exclude:
- cron
steps:
- name: build
image: rancher/dapper:v0.5.0
@ -222,6 +274,11 @@ platform:
os: linux
arch: arm
trigger:
event:
exclude:
- cron
steps:
- name: build
image: rancher/dapper:v0.5.0
@ -312,6 +369,11 @@ platform:
node:
arch: s390x
trigger:
event:
exclude:
- cron
clone:
disable: true
@ -412,6 +474,11 @@ platform:
os: linux
arch: amd64
trigger:
event:
exclude:
- cron
steps:
- name: validate_go_mods
image: rancher/dapper:v0.5.0
@ -459,7 +526,10 @@ trigger:
- refs/head/master
- refs/tags/*
event:
- tag
include:
- tag
exclude:
- cron
depends_on:
- amd64
@ -499,3 +569,76 @@ trigger:
depends_on:
- manifest
---
kind: pipeline
name: e2e
type: docker
platform:
os: linux
arch: amd64
steps:
- name: build-e2e-image
image: rancher/dapper:v0.5.0
commands:
- DOCKER_BUILDKIT=1 docker build --target test-e2e -t test-e2e -f Dockerfile.test .
- SKIP_VALIDATE=true SKIP_AIRGAP=true dapper ci
- cp dist/artifacts/* /tmp/artifacts/
volumes:
- name: cache
path: /tmp/artifacts
- name: docker
path: /var/run/docker.sock
- name: test-e2e
image: test-e2e
pull: never
resources:
cpu: 6000
memory: 10Gi
environment:
E2E_REGISTRY: 'true'
commands:
- mkdir -p dist/artifacts
- cp /tmp/artifacts/* dist/artifacts/
- docker stop registry && docker rm registry
# Cleanup any VMs running, happens if a previous test panics
- |
VMS=$(virsh list --name | grep '_server-\|_agent-' || true)
if [ -n "$VMS" ]; then
for vm in $VMS
do
virsh destroy $vm
virsh undefine $vm --remove-all-storage
done
fi
- docker run -d -p 5000:5000 -e REGISTRY_PROXY_REMOTEURL=https://registry-1.docker.io --name registry registry:2
- cd tests/e2e/validatecluster
- vagrant destroy -f
- go test -v -timeout=45m ./validatecluster_test.go -ci -local
- cd ../secretsencryption
- vagrant destroy -f
- go test -v -timeout=30m ./secretsencryption_test.go -ci -local
- cd ../upgradecluster
- E2E_RELEASE_CHANNEL="v1.24" go test -v -timeout=45m ./upgradecluster_test.go -ci -local
- docker stop registry && docker rm registry
volumes:
- name: libvirt
path: /var/run/libvirt/
- name: docker
path: /var/run/docker.sock
- name: cache
path: /tmp/artifacts
volumes:
- name: docker
host:
path: /var/run/docker.sock
- name: libvirt
host:
path: /var/run/libvirt/
- name: cache
temp: {}

View File

@ -35,3 +35,21 @@ ENV TEST_CLEANUP true
ENTRYPOINT ["./scripts/entry.sh"]
CMD ["test"]
FROM vagrantlibvirt/vagrant-libvirt:0.10.7 AS test-e2e
RUN apt-get update && apt-get install -y docker.io
RUN vagrant plugin install vagrant-k3s vagrant-reload vagrant-scp
RUN vagrant box add generic/ubuntu2004 --provider libvirt --force
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \
chmod +x ./kubectl; \
mv ./kubectl /usr/local/bin/kubectl
ENV GO_VERSION 1.19.2
RUN curl -O -L "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz"; \
rm -rf /usr/local/go; \
tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz;
ENV PATH="${PATH}:/usr/local/go/bin"

View File

@ -56,7 +56,7 @@ var (
ETCDServerServiceName = version.Program + "-etcd-server-load-balancer"
)
// New contstructs a new LoadBalancer instance. The default server URL, and
// New constructs a new LoadBalancer instance. The default server URL, and
// currently active servers, are stored in a file within the dataDir.
func New(ctx context.Context, dataDir, serviceName, serverURL string, lbServerPort int, isIPv6 bool) (_lb *LoadBalancer, _err error) {
config := net.ListenConfig{Control: reusePort}

View File

@ -43,26 +43,47 @@ echo "Did test-run-lazypull $?"
[ "$ARCH" != 'amd64' ] && \
early-exit "Skipping remaining tests, images not available for $ARCH."
E2E_OUTPUT=$artifacts test-run-sonobuoy serial
echo "Did test-run-sonobuoy serial $?"
# ---
if [ "$DRONE_BUILD_EVENT" = 'tag' ]; then
E2E_OUTPUT=$artifacts test-run-sonobuoy serial
echo "Did test-run-sonobuoy serial $?"
E2E_OUTPUT=$artifacts test-run-sonobuoy parallel
echo "Did test-run-sonobuoy parallel $?"
early-exit 'Skipping remaining tests on tag.'
fi
# ---
test-run-sonobuoy etcd serial
echo "Did test-run-sonobuoy-etcd serial $?"
test-run-sonobuoy mysql serial
echo "Did test-run-sonobuoy-mysqk serial $?"
test-run-sonobuoy postgres serial
echo "Did test-run-sonobuoy-postgres serial $?"
if [ "$DRONE_BUILD_EVENT" = 'cron' ]; then
E2E_OUTPUT=$artifacts test-run-sonobuoy serial
echo "Did test-run-sonobuoy serial $?"
test-run-sonobuoy etcd serial
echo "Did test-run-sonobuoy-etcd serial $?"
test-run-sonobuoy mysql serial
echo "Did test-run-sonobuoy-mysqk serial $?"
test-run-sonobuoy postgres serial
echo "Did test-run-sonobuoy-postgres serial $?"
# Wait until all serial tests have finished
# Wait until all serial tests have finished
delay=15
(
set +x
while [ $(count-running-tests) -ge 1 ]; do
sleep $delay
done
)
E2E_OUTPUT=$artifacts test-run-sonobuoy parallel
echo "Did test-run-sonobuoy parallel $?"
test-run-sonobuoy etcd parallel
echo "Did test-run-sonobuoy-etcd parallel $?"
test-run-sonobuoy mysql parallel
echo "Did test-run-sonobuoy-mysql parallel $?"
test-run-sonobuoy postgres parallel
echo "Did test-run-sonobuoy-postgres parallel $?"
fi
# Wait until all tests have finished
delay=15
(
set +x
@ -70,16 +91,5 @@ while [ $(count-running-tests) -ge 1 ]; do
sleep $delay
done
)
E2E_OUTPUT=$artifacts test-run-sonobuoy parallel
echo "Did test-run-sonobuoy parallel $?"
test-run-sonobuoy etcd parallel
echo "Did test-run-sonobuoy-etcd parallel $?"
test-run-sonobuoy mysql parallel
echo "Did test-run-sonobuoy-mysql parallel $?"
test-run-sonobuoy postgres parallel
echo "Did test-run-sonobuoy-postgres parallel $?"
exit 0

View File

@ -29,9 +29,6 @@ E2E_REGISTRY=true E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v validate
echo 'RUNNING SECRETS ENCRYPTION TEST'
/usr/local/go/bin/go test -v secretsencryption/secretsencryption_test.go -nodeOS="$nodeOS" -serverCount=$((servercount)) -timeout=1h -json -ci | tee -a k3s_"$OS".log
echo 'RUN CLUSTER RESET TEST'
/usr/local/go/bin/go test -v clusterreset/clusterreset_test.go -nodeOS="$nodeOS" -serverCount=3 -agentCount=1 -timeout=30m -json -ci | tee -a createreport/k3s_"$OS".log
echo 'RUNNING SPLIT SERVER VALIDATION TEST'
E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v splitserver/splitserver_test.go -nodeOS="$nodeOS" -timeout=30m -json -ci | tee -a k3s_"$OS".log

View File

@ -16,6 +16,8 @@ import (
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
@ -38,7 +40,11 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
Context("Secrets Keys are rotated:", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
if *local {
serverNodeNames, _, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0)
} else {
serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
@ -107,7 +113,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
})
It("Restarts K3s servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
})
It("Checks node and pod status", func() {
@ -166,7 +172,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
})
It("Restarts K3s servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
})
It("Verifies encryption rotate stage", func() {
@ -201,7 +207,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
})
It("Restarts K3s Servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
})
It("Verifies Encryption Reencrypt Stage", func() {
@ -300,7 +306,7 @@ var _ = AfterEach(func() {
})
var _ = AfterSuite(func() {
if failed {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())

View File

@ -6,6 +6,7 @@ import (
"os"
"strings"
"testing"
"time"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
@ -44,7 +45,7 @@ var (
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
Context("Cluster :", func() {
Context("Cluster creates snapshots and workloads:", func() {
It("Starts up with no issues", func() {
var err error
if *local {
@ -122,6 +123,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
}, "240s", "5s").Should(Succeed())
})
})
Context("Cluster is reset normally", func() {
It("Resets the cluster", func() {
for _, nodeName := range serverNodeNames {
cmd := "sudo systemctl stop k3s"
@ -168,6 +171,7 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
for _, nodeName := range serverNodeNames[1:] {
cmd := "sudo systemctl start k3s"
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
time.Sleep(20 * time.Second) //Stagger the restarts for etcd leaners
}
})
@ -176,7 +180,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
nodeJournal, _ := e2e.GetJournalLogs(node.Name)
g.Expect(node.Status).Should(Equal("Ready"), nodeJournal)
}
}, "420s", "5s").Should(Succeed())
@ -203,6 +208,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
Expect(res).Should(ContainSubstring("test-nodeport"))
})
})
Context("Cluster restores from snapshot", func() {
It("Restores the snapshot", func() {
//Stop k3s on all nodes
for _, nodeName := range serverNodeNames {

View File

@ -25,6 +25,10 @@ type Node struct {
ExternalIP string
}
func (n Node) String() string {
return fmt.Sprintf("Node (name: %s, status: %s, roles: %s)", n.Name, n.Status, n.Roles)
}
type Pod struct {
NameSpace string
Name string
@ -136,6 +140,19 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
return serverNodeNames, agentNodeNames, nil
}
func scpK3sBinary(nodeNames []string) error {
for _, node := range nodeNames {
cmd := fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
if _, err := RunCommand(cmd); err != nil {
return fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
}
if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil {
return err
}
}
return nil
}
// CreateLocalCluster creates a cluster using the locally built k3s binary. The vagrant-scp plugin must be installed for
// this function to work. The binary is deployed as an airgapped install of k3s on the VMs.
// This is intended only for local testing purposes when writing a new E2E test.
@ -173,14 +190,8 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
if err := errg.Wait(); err != nil {
return nil, nil, err
}
for _, node := range append(serverNodeNames, agentNodeNames...) {
cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
}
if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil {
return nil, nil, err
}
if err := scpK3sBinary(append(serverNodeNames, agentNodeNames...)); err != nil {
return nil, nil, err
}
// Install K3s on all nodes in parallel
@ -203,6 +214,15 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
return serverNodeNames, agentNodeNames, nil
}
// Deletes the content of a manifest file previously applied
func DeleteWorkload(workload, kubeconfig string) error {
cmd := "kubectl delete -f " + workload + " --kubeconfig=" + kubeconfig
if _, err := RunCommand(cmd); err != nil {
return err
}
return nil
}
func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) {
resourceDir := "../amd64_resource_files"
if hardened {
@ -302,13 +322,18 @@ func GenReport(specReport ginkgo.SpecReport) {
fmt.Printf("%s", status)
}
func GetJournalLogs(node string) (string, error) {
cmd := "journalctl -u k3s* --no-pager"
return RunCmdOnNode(cmd, node)
}
// GetVagrantLog returns the logs of on vagrant commands that initialize the nodes and provision K3s on each node.
// It also attempts to fetch the systemctl logs of K3s on nodes where the k3s.service failed.
func GetVagrantLog(cErr error) string {
var nodeErr *NodeError
nodeJournal := ""
if errors.As(cErr, &nodeErr) {
nodeJournal, _ = RunCmdOnNode("sudo journalctl -u k3s* --no-pager", nodeErr.Node)
nodeJournal, _ = GetJournalLogs(nodeErr.Node)
nodeJournal = "\nNode Journal Logs:\n" + nodeJournal
}
@ -331,7 +356,7 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
res, err := RunCommand(cmd)
if err != nil {
return nil, err
return nil, fmt.Errorf("unable to get nodes: %s: %v", res, err)
}
nodeList = strings.TrimSpace(res)
split := strings.Split(nodeList, "\n")
@ -391,7 +416,18 @@ func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
// RestartCluster restarts the k3s service on each node given
func RestartCluster(nodeNames []string) error {
for _, nodeName := range nodeNames {
cmd := "sudo systemctl restart k3s"
cmd := "sudo systemctl restart k3s*"
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
return err
}
}
return nil
}
// RestartCluster restarts the k3s service on each node given
func RestartClusterAgent(nodeNames []string) error {
for _, nodeName := range nodeNames {
cmd := "sudo systemctl restart k3s-agent"
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
return err
}
@ -404,7 +440,7 @@ func RunCmdOnNode(cmd string, nodename string) (string, error) {
runcmd := "vagrant ssh -c \"" + cmd + "\" " + nodename
out, err := RunCommand(runcmd)
if err != nil {
return out, fmt.Errorf("failed to run command %s on node %s: %v", cmd, nodename, err)
return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, nodename, out, err)
}
return out, nil
}
@ -416,19 +452,18 @@ func RunCommand(cmd string) (string, error) {
return string(out), err
}
func UpgradeCluster(serverNodeNames []string, agentNodeNames []string) error {
for _, nodeName := range serverNodeNames {
cmd := "E2E_RELEASE_CHANNEL=commit vagrant provision " + nodeName
fmt.Println(cmd)
if out, err := RunCommand(cmd); err != nil {
fmt.Println("Error Upgrading Cluster", out)
func UpgradeCluster(nodeNames []string, local bool) error {
upgradeVersion := "E2E_RELEASE_CHANNEL=commit"
if local {
if err := scpK3sBinary(nodeNames); err != nil {
return err
}
upgradeVersion = "E2E_RELEASE_VERSION=skip"
}
for _, nodeName := range agentNodeNames {
cmd := "E2E_RELEASE_CHANNEL=commit vagrant provision " + nodeName
if _, err := RunCommand(cmd); err != nil {
fmt.Println("Error Upgrading Cluster", err)
for _, nodeName := range nodeNames {
cmd := upgradeVersion + " vagrant provision " + nodeName
if out, err := RunCommand(cmd); err != nil {
fmt.Println("Error Upgrading Cluster", out)
return err
}
}
@ -462,7 +497,11 @@ func GetObjIPs(cmd string) ([]ObjIP, error) {
if len(fields) > 2 {
objIPs = append(objIPs, ObjIP{Name: fields[0], IPv4: fields[1], IPv6: fields[2]})
} else if len(fields) > 1 {
objIPs = append(objIPs, ObjIP{Name: fields[0], IPv4: fields[1]})
if strings.Contains(fields[1], ".") {
objIPs = append(objIPs, ObjIP{Name: fields[0], IPv4: fields[1]})
} else {
objIPs = append(objIPs, ObjIP{Name: fields[0], IPv6: fields[1]})
}
} else {
objIPs = append(objIPs, ObjIP{Name: fields[0]})
}

View File

@ -25,16 +25,16 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults
defaultOSConfigure(vm)
if !RELEASE_VERSION.empty?
if RELEASE_VERSION == "skip"
install_type = "INSTALL_K3S_SKIP_DOWNLOAD=true"
elsif !RELEASE_VERSION.empty?
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
elsif RELEASE_CHANNEL == "commit"
vm.provision "shell", path: "../scripts/latest_commit.sh", args: ["master", "/tmp/k3s_commits"]
install_type = "INSTALL_K3S_COMMIT=$(head\ -n\ 1\ /tmp/k3s_commits)"
else
vm.provision "latest version", type: "shell",
inline: "curl -w '%{url_effective}' -L -s -S https://update.k3s.io/v1-release/channels/#{RELEASE_CHANNEL} -o /dev/null | sed -e 's|.*/||' &> /tmp/k3s_version"
install_type = "INSTALL_K3S_VERSION=$(cat\ /tmp/k3s_version)"
install_type = "INSTALL_K3S_CHANNEL=#{RELEASE_CHANNEL}"
end

View File

@ -20,10 +20,11 @@ var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "Controls which version k3s upgrades too, local binary or latest commit on master")
// Environment Variables Info:
// E2E_REGISTRY: true/false (default: false)
// Controls which K3s version is installed first, upgrade is always to latest commit
// Controls which K3s version is installed first
// E2E_RELEASE_VERSION=v1.23.3+k3s1
// OR
// E2E_RELEASE_CHANNEL=(commit|latest|stable), commit pulls latest commit from master
@ -249,9 +250,8 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
It("Upgrades with no issues", func() {
var err error
err = e2e.UpgradeCluster(serverNodeNames, agentNodeNames)
fmt.Println(err)
Expect(err).NotTo(HaveOccurred())
Expect(e2e.UpgradeCluster(append(serverNodeNames, agentNodeNames...), *local)).To(Succeed())
Expect(e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))).To(Succeed())
fmt.Println("CLUSTER UPGRADED")
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())

View File

@ -216,6 +216,49 @@ var _ = Describe("Verify Create", Ordered, func() {
}, "420s", "2s").Should(Succeed())
})
It("Verifies Restart", func() {
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
defer e2e.DeleteWorkload("daemonset.yaml", kubeConfigFile)
nodes, _ := e2e.ParseNodes(kubeConfigFile, false)
Eventually(func(g Gomega) {
pods, _ := e2e.ParsePods(kubeConfigFile, false)
count := e2e.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
podsRunning := 0
for _, pod := range pods {
if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" {
podsRunning++
}
}
g.Expect(len(nodes)).Should((Equal(podsRunning)), "Daemonset running pods count does not match node count")
}, "620s", "5s").Should(Succeed())
errRestart := e2e.RestartCluster(serverNodeNames)
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")
if len(agentNodeNames) > 0 {
errRestartAgent := e2e.RestartCluster(agentNodeNames)
Expect(errRestartAgent).NotTo(HaveOccurred(), "Restart Agent not happened correctly")
}
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
pods, _ := e2e.ParsePods(kubeConfigFile, false)
count := e2e.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
podsRunningAr := 0
for _, pod := range pods {
if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" {
podsRunningAr++
}
}
g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart")
}, "620s", "5s").Should(Succeed())
})
It("Verifies Local Path Provisioner storage ", func() {
res, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed: "+res)