mirror of https://github.com/k3s-io/k3s
commit
9e461026a1
|
@ -32,7 +32,7 @@ var (
|
||||||
agentNodeNames []string
|
agentNodeNames []string
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Verify CRI-Dockerd", func() {
|
var _ = Describe("Verify CRI-Dockerd", Ordered, func() {
|
||||||
Context("Cluster :", func() {
|
Context("Cluster :", func() {
|
||||||
It("Starts up with no issues", func() {
|
It("Starts up with no issues", func() {
|
||||||
var err error
|
var err error
|
||||||
|
|
|
@ -70,7 +70,7 @@ var (
|
||||||
agentNodeNames []string
|
agentNodeNames []string
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Verify DualStack Configuration", func() {
|
var _ = Describe("Verify DualStack Configuration", Ordered, func() {
|
||||||
|
|
||||||
It("Starts up with no issues", func() {
|
It("Starts up with no issues", func() {
|
||||||
var err error
|
var err error
|
||||||
|
@ -201,7 +201,7 @@ var _ = Describe("Verify DualStack Configuration", func() {
|
||||||
|
|
||||||
var failed bool
|
var failed bool
|
||||||
var _ = AfterEach(func() {
|
var _ = AfterEach(func() {
|
||||||
failed = failed || CurrentGinkgoTestDescription().Failed
|
failed = failed || CurrentSpecReport().Failed()
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = AfterSuite(func() {
|
var _ = AfterSuite(func() {
|
||||||
|
|
|
@ -7,13 +7,15 @@ curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stabl
|
||||||
sudo mv kubectl /usr/local/bin/ && \
|
sudo mv kubectl /usr/local/bin/ && \
|
||||||
chmod a+x /usr/local/bin/kubectl
|
chmod a+x /usr/local/bin/kubectl
|
||||||
|
|
||||||
echo 'Installing jq'
|
echo 'Installing jq and docker'
|
||||||
sudo apt-get -y install jq
|
sudo apt-get -y install jq docker.io
|
||||||
|
|
||||||
echo 'Installing Go'
|
echo 'Installing Go'
|
||||||
curl -L https://dl.google.com/go/go1.16.10.linux-amd64.tar.gz | tar xz
|
GO_VERSION=1.19.1
|
||||||
sudo mv go /usr/local
|
wget --quiet https://dl.google.com/go/go$GO_VERSION.linux-amd64.tar.gz
|
||||||
/usr/local/go/bin/go version
|
sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go$GO_VERSION.linux-amd64.tar.gz
|
||||||
|
rm go$GO_VERSION.linux-amd64.tar.gz
|
||||||
|
echo
|
||||||
go version
|
go version
|
||||||
|
|
||||||
echo 'Installing Virtualbox'
|
echo 'Installing Virtualbox'
|
||||||
|
@ -29,11 +31,18 @@ echo 'Installing vagrant'
|
||||||
sudo apt-get -y install -f unzip
|
sudo apt-get -y install -f unzip
|
||||||
curl -O https://releases.hashicorp.com/vagrant/2.2.19/vagrant_2.2.19_linux_amd64.zip
|
curl -O https://releases.hashicorp.com/vagrant/2.2.19/vagrant_2.2.19_linux_amd64.zip
|
||||||
unzip vagrant_2.2.19_linux_amd64.zip
|
unzip vagrant_2.2.19_linux_amd64.zip
|
||||||
sudo cp vagrant /usr/local/bin/
|
sudo mv vagrant /usr/local/bin/
|
||||||
|
rm vagrant_2.2.19_linux_amd64.zip
|
||||||
vagrant --version
|
vagrant --version
|
||||||
sudo apt-get -y install libarchive-tools
|
sudo apt-get -y install libarchive-tools
|
||||||
vagrant plugin install vagrant-k3s
|
vagrant plugin install vagrant-k3s vagrant-reload vagrant-scp
|
||||||
vagrant plugin install vagrant-reload
|
|
||||||
|
|
||||||
echo 'Cloning repo'
|
echo 'Cloning repo'
|
||||||
ls k3s 2>/dev/null || git clone https://github.com/k3s-io/k3s.git
|
ls k3s 2>/dev/null || git clone https://github.com/k3s-io/k3s.git
|
||||||
|
|
||||||
|
# Use curl -X GET <IP_ADDR>:5000/v2/_catalog to see cached images
|
||||||
|
echo 'Setting up docker registry as a cache'
|
||||||
|
docker run -d -p 5000:5000 \
|
||||||
|
-e REGISTRY_PROXY_REMOTEURL=https://registry-1.docker.io \
|
||||||
|
--restart always \
|
||||||
|
--name registry registry:2
|
|
@ -0,0 +1,11 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to to point k3s to the docker registry running on the host
|
||||||
|
# This is used to avoid hitting dockerhub rate limits on E2E runners
|
||||||
|
ip_addr=$1
|
||||||
|
|
||||||
|
mkdir -p /etc/rancher/k3s/
|
||||||
|
echo "mirrors:
|
||||||
|
docker.io:
|
||||||
|
endpoint:
|
||||||
|
- \"http://$ip_addr:5000\"" >> /etc/rancher/k3s/registries.yaml
|
|
@ -7,14 +7,13 @@ k3s_channel=${k3s_channel:-"commit"}
|
||||||
hardened=${8:-""}
|
hardened=${8:-""}
|
||||||
|
|
||||||
E2E_EXTERNAL_DB=$db && export E2E_EXTERNAL_DB
|
E2E_EXTERNAL_DB=$db && export E2E_EXTERNAL_DB
|
||||||
|
E2E_REGISTRY=true && export E2E_REGISTRY
|
||||||
|
|
||||||
eval openvpn --daemon --config external.ovpn &>/dev/null &
|
eval openvpn --daemon --config external.ovpn &>/dev/null &
|
||||||
sleep 10
|
sleep 10
|
||||||
|
|
||||||
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s && git pull --rebase origin master'
|
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s && git pull --rebase origin master'
|
||||||
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 '/usr/local/go/bin/go get github.com/onsi/ginkgo/v2'
|
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s && go mod tidy'
|
||||||
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 '/usr/local/go/bin/go get github.com/onsi/gomega'
|
|
||||||
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 '/usr/local/go/bin/go get github.com/k3s-io/k3s/tests/e2e'
|
|
||||||
|
|
||||||
echo 'RUNNING CLUSTER VALIDATION TEST'
|
echo 'RUNNING CLUSTER VALIDATION TEST'
|
||||||
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/validatecluster && vagrant destroy -f'
|
ssh -i "$1" -o "StrictHostKeyChecking no" $2@$3 'cd k3s/tests/e2e/validatecluster && vagrant destroy -f'
|
||||||
|
|
|
@ -31,7 +31,7 @@ var (
|
||||||
serverNodeNames []string
|
serverNodeNames []string
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Verify Secrets Encryption Rotation", func() {
|
var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
||||||
Context("Cluster :", func() {
|
Context("Cluster :", func() {
|
||||||
It("Starts up with no issues", func() {
|
It("Starts up with no issues", func() {
|
||||||
var err error
|
var err error
|
||||||
|
|
|
@ -70,7 +70,7 @@ var (
|
||||||
agentNodeNames []string
|
agentNodeNames []string
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Verify Create", func() {
|
var _ = Describe("Verify Create", Ordered, func() {
|
||||||
Context("Cluster :", func() {
|
Context("Cluster :", func() {
|
||||||
It("Starts up with no issues", func() {
|
It("Starts up with no issues", func() {
|
||||||
var err error
|
var err error
|
||||||
|
@ -221,7 +221,7 @@ var _ = Describe("Verify Create", func() {
|
||||||
|
|
||||||
var failed = false
|
var failed = false
|
||||||
var _ = AfterEach(func() {
|
var _ = AfterEach(func() {
|
||||||
failed = failed || CurrentGinkgoTestDescription().Failed
|
failed = failed || CurrentSpecReport().Failed()
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = AfterSuite(func() {
|
var _ = AfterSuite(func() {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
@ -8,6 +9,9 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Node struct {
|
type Node struct {
|
||||||
|
@ -70,12 +74,30 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
|
||||||
testOptions += " " + env
|
testOptions += " " + env
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Bring up the first server node
|
||||||
|
cmd := fmt.Sprintf(`%s %s vagrant up %s &> vagrant.log`, nodeEnvs, testOptions, serverNodeNames[0])
|
||||||
|
|
||||||
cmd := fmt.Sprintf(`%s %s vagrant up &> vagrant.log`, nodeEnvs, testOptions)
|
|
||||||
fmt.Println(cmd)
|
fmt.Println(cmd)
|
||||||
if _, err := RunCommand(cmd); err != nil {
|
if _, err := RunCommand(cmd); err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
|
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
|
||||||
}
|
}
|
||||||
|
// Bring up the rest of the nodes in parallel
|
||||||
|
errg, _ := errgroup.WithContext(context.Background())
|
||||||
|
for _, node := range append(serverNodeNames[1:], agentNodeNames...) {
|
||||||
|
cmd := fmt.Sprintf(`%s %s vagrant up %s &>> vagrant.log`, nodeEnvs, testOptions, node)
|
||||||
|
errg.Go(func() error {
|
||||||
|
if _, err := RunCommand(cmd); err != nil {
|
||||||
|
return fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
// We must wait a bit between provisioning nodes to avoid too many learners attempting to join the cluster
|
||||||
|
time.Sleep(20 * time.Second)
|
||||||
|
}
|
||||||
|
if err := errg.Wait(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return serverNodeNames, agentNodeNames, nil
|
return serverNodeNames, agentNodeNames, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,6 +109,8 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
|
||||||
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
|
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
|
||||||
|
|
||||||
var testOptions string
|
var testOptions string
|
||||||
|
var cmd string
|
||||||
|
|
||||||
for _, env := range os.Environ() {
|
for _, env := range os.Environ() {
|
||||||
if strings.HasPrefix(env, "E2E_") {
|
if strings.HasPrefix(env, "E2E_") {
|
||||||
testOptions += " " + env
|
testOptions += " " + env
|
||||||
|
@ -94,14 +118,27 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
|
||||||
}
|
}
|
||||||
testOptions += " E2E_RELEASE_VERSION=skip"
|
testOptions += " E2E_RELEASE_VERSION=skip"
|
||||||
|
|
||||||
cmd := fmt.Sprintf(`%s vagrant up --no-provision &> vagrant.log`, nodeEnvs)
|
// Bring up the all of the nodes in parallel
|
||||||
if _, err := RunCommand(cmd); err != nil {
|
errg, _ := errgroup.WithContext(context.Background())
|
||||||
return nil, nil, fmt.Errorf("failed creating nodes: %s: %v", cmd, err)
|
for i, node := range append(serverNodeNames, agentNodeNames...) {
|
||||||
|
if i == 0 {
|
||||||
|
cmd = fmt.Sprintf(`%s %s vagrant up --no-provision %s &> vagrant.log`, nodeEnvs, testOptions, node)
|
||||||
|
} else {
|
||||||
|
cmd = fmt.Sprintf(`%s %s vagrant up --no-provision %s &>> vagrant.log`, nodeEnvs, testOptions, node)
|
||||||
|
}
|
||||||
|
errg.Go(func() error {
|
||||||
|
if _, err := RunCommand(cmd); err != nil {
|
||||||
|
return fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
// libVirt/Virtualbox needs some time between provisioning nodes
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
}
|
}
|
||||||
|
if err := errg.Wait(); err != nil {
|
||||||
nodeRoles := append(serverNodeNames, agentNodeNames...)
|
return nil, nil, err
|
||||||
|
}
|
||||||
for _, node := range nodeRoles {
|
for _, node := range append(serverNodeNames, agentNodeNames...) {
|
||||||
cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
|
cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
|
||||||
if _, err := RunCommand(cmd); err != nil {
|
if _, err := RunCommand(cmd); err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
|
return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
|
||||||
|
@ -111,9 +148,21 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd = fmt.Sprintf(`%s %s vagrant provision &>> vagrant.log`, nodeEnvs, testOptions)
|
// Install K3s on all nodes in parallel
|
||||||
if _, err := RunCommand(cmd); err != nil {
|
errg, _ = errgroup.WithContext(context.Background())
|
||||||
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
|
for _, node := range append(serverNodeNames, agentNodeNames...) {
|
||||||
|
cmd = fmt.Sprintf(`%s %s vagrant provision %s &>> vagrant.log`, nodeEnvs, testOptions, node)
|
||||||
|
errg.Go(func() error {
|
||||||
|
if _, err := RunCommand(cmd); err != nil {
|
||||||
|
return fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
// K3s needs some time between joining nodes to avoid learner issues
|
||||||
|
time.Sleep(20 * time.Second)
|
||||||
|
}
|
||||||
|
if err := errg.Wait(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return serverNodeNames, agentNodeNames, nil
|
return serverNodeNames, agentNodeNames, nil
|
||||||
|
@ -247,11 +296,11 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParsePods(kubeconfig string, print bool) ([]Pod, error) {
|
func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
|
||||||
pods := make([]Pod, 0, 10)
|
pods := make([]Pod, 0, 10)
|
||||||
podList := ""
|
podList := ""
|
||||||
|
|
||||||
cmd := "kubectl get pods -o wide --no-headers -A --kubeconfig=" + kubeconfig
|
cmd := "kubectl get pods -o wide --no-headers -A --kubeconfig=" + kubeConfig
|
||||||
res, _ := RunCommand(cmd)
|
res, _ := RunCommand(cmd)
|
||||||
res = strings.TrimSpace(res)
|
res = strings.TrimSpace(res)
|
||||||
podList = res
|
podList = res
|
||||||
|
|
|
@ -6,6 +6,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
|
||||||
RELEASE_CHANNEL = (ENV['E2E_RELEASE_CHANNEL'] || "latest")
|
RELEASE_CHANNEL = (ENV['E2E_RELEASE_CHANNEL'] || "latest")
|
||||||
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
|
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
|
||||||
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
|
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
|
||||||
|
REGISTRY = (ENV['E2E_REGISTRY'] || "")
|
||||||
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
|
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
|
||||||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 1024).to_i
|
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 1024).to_i
|
||||||
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
|
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
|
||||||
|
@ -41,7 +42,9 @@ def provision(vm, role, role_num, node_num)
|
||||||
vm.provision "shell", inline: "ping -c 2 k3s.io"
|
vm.provision "shell", inline: "ping -c 2 k3s.io"
|
||||||
db_type = getDBType(role, role_num, vm)
|
db_type = getDBType(role, role_num, vm)
|
||||||
|
|
||||||
|
if !REGISTRY.empty?
|
||||||
|
vm.provision "Set private registry", type: "shell", path: scripts_location + "/registry.sh", args: [ "#{NETWORK_PREFIX}.1" ]
|
||||||
|
end
|
||||||
|
|
||||||
if role.include?("server") && role_num == 0
|
if role.include?("server") && role_num == 0
|
||||||
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
|
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
|
||||||
|
|
|
@ -19,8 +19,11 @@ var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
|
||||||
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
|
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
|
||||||
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
|
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
|
||||||
var hardened = flag.Bool("hardened", false, "true or false")
|
var hardened = flag.Bool("hardened", false, "true or false")
|
||||||
|
var ci = flag.Bool("ci", false, "running on CI")
|
||||||
|
|
||||||
// Environment Variables Info:
|
// Environment Variables Info:
|
||||||
|
// E2E_REGISTRY: true/false (default: false)
|
||||||
|
// Controls which K3s version is installed first, upgrade is always to latest commit
|
||||||
// E2E_RELEASE_VERSION=v1.23.3+k3s1
|
// E2E_RELEASE_VERSION=v1.23.3+k3s1
|
||||||
// OR
|
// OR
|
||||||
// E2E_RELEASE_CHANNEL=(commit|latest|stable), commit pulls latest commit from master
|
// E2E_RELEASE_CHANNEL=(commit|latest|stable), commit pulls latest commit from master
|
||||||
|
@ -37,7 +40,7 @@ var (
|
||||||
agentNodeNames []string
|
agentNodeNames []string
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Verify Upgrade", func() {
|
var _ = Describe("Verify Upgrade", Ordered, func() {
|
||||||
Context("Cluster :", func() {
|
Context("Cluster :", func() {
|
||||||
It("Starts up with no issues", func() {
|
It("Starts up with no issues", func() {
|
||||||
var err error
|
var err error
|
||||||
|
@ -375,11 +378,11 @@ var _ = Describe("Verify Upgrade", func() {
|
||||||
|
|
||||||
var failed = false
|
var failed = false
|
||||||
var _ = AfterEach(func() {
|
var _ = AfterEach(func() {
|
||||||
failed = failed || CurrentGinkgoTestDescription().Failed
|
failed = failed || CurrentSpecReport().Failed()
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = AfterSuite(func() {
|
var _ = AfterSuite(func() {
|
||||||
if failed {
|
if failed && !*ci {
|
||||||
fmt.Println("FAILED!")
|
fmt.Println("FAILED!")
|
||||||
} else {
|
} else {
|
||||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||||
|
|
|
@ -7,6 +7,7 @@ GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
|
||||||
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
|
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
|
||||||
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
|
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
|
||||||
HARDENED = (ENV['E2E_HARDENED'] || "")
|
HARDENED = (ENV['E2E_HARDENED'] || "")
|
||||||
|
REGISTRY = (ENV['E2E_REGISTRY'] || "")
|
||||||
RANCHER = (ENV['E2E_RANCHER'] || "")
|
RANCHER = (ENV['E2E_RANCHER'] || "")
|
||||||
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
|
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
|
||||||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 1024).to_i
|
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 1024).to_i
|
||||||
|
@ -37,6 +38,10 @@ def provision(vm, role, role_num, node_num)
|
||||||
vm.provision "Set kernel parameters", type: "shell", path: scripts_location + "/harden.sh"
|
vm.provision "Set kernel parameters", type: "shell", path: scripts_location + "/harden.sh"
|
||||||
hardened_arg = "protect-kernel-defaults: true\nkube-apiserver-arg: \"enable-admission-plugins=NodeRestriction,PodSecurityPolicy,ServiceAccount\""
|
hardened_arg = "protect-kernel-defaults: true\nkube-apiserver-arg: \"enable-admission-plugins=NodeRestriction,PodSecurityPolicy,ServiceAccount\""
|
||||||
end
|
end
|
||||||
|
if !REGISTRY.empty?
|
||||||
|
vm.provision "Set private registry", type: "shell", path: scripts_location + "/registry.sh", args: [ "#{NETWORK_PREFIX}.1" ]
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
if role.include?("server") && role_num == 0
|
if role.include?("server") && role_num == 0
|
||||||
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
|
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
|
||||||
|
|
|
@ -19,10 +19,13 @@ var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
|
||||||
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
|
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
|
||||||
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
|
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
|
||||||
var hardened = flag.Bool("hardened", false, "true or false")
|
var hardened = flag.Bool("hardened", false, "true or false")
|
||||||
|
var ci = flag.Bool("ci", false, "running on CI")
|
||||||
|
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
|
||||||
|
|
||||||
// Environment Variables Info:
|
// Environment Variables Info:
|
||||||
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
|
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
|
||||||
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
|
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
|
||||||
|
// E2E_REGISTRY: true/false (default: false)
|
||||||
|
|
||||||
func Test_E2EClusterValidation(t *testing.T) {
|
func Test_E2EClusterValidation(t *testing.T) {
|
||||||
RegisterFailHandler(Fail)
|
RegisterFailHandler(Fail)
|
||||||
|
@ -36,11 +39,15 @@ var (
|
||||||
agentNodeNames []string
|
agentNodeNames []string
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Verify Create", func() {
|
var _ = Describe("Verify Create", Ordered, func() {
|
||||||
Context("Cluster :", func() {
|
Context("Cluster :", func() {
|
||||||
It("Starts up with no issues", func() {
|
It("Starts up with no issues", func() {
|
||||||
var err error
|
var err error
|
||||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
if *local {
|
||||||
|
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||||
|
} else {
|
||||||
|
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||||
|
}
|
||||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog())
|
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog())
|
||||||
fmt.Println("CLUSTER CONFIG")
|
fmt.Println("CLUSTER CONFIG")
|
||||||
fmt.Println("OS:", *nodeOS)
|
fmt.Println("OS:", *nodeOS)
|
||||||
|
@ -77,14 +84,14 @@ var _ = Describe("Verify Create", func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Verifies ClusterIP Service", func() {
|
It("Verifies ClusterIP Service", func() {
|
||||||
_, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened)
|
res, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened)
|
||||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed: "+res)
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func(g Gomega) {
|
||||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||||
res, err := e2e.RunCommand(cmd)
|
res, err := e2e.RunCommand(cmd)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
g.Expect(res).Should((ContainSubstring("test-clusterip")))
|
g.Expect(res).Should((ContainSubstring("test-clusterip")), "failed cmd: "+cmd+" result: "+res)
|
||||||
}, "240s", "5s").Should(Succeed())
|
}, "240s", "5s").Should(Succeed())
|
||||||
|
|
||||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
|
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
|
||||||
|
@ -207,8 +214,8 @@ var _ = Describe("Verify Create", func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Verifies Local Path Provisioner storage ", func() {
|
It("Verifies Local Path Provisioner storage ", func() {
|
||||||
_, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
|
res, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
|
||||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed: "+res)
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func(g Gomega) {
|
||||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
|
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
|
||||||
|
@ -231,7 +238,7 @@ var _ = Describe("Verify Create", func() {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
|
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
|
||||||
res, err := e2e.RunCommand(cmd)
|
res, err = e2e.RunCommand(cmd)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
|
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
|
||||||
|
|
||||||
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
|
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
|
||||||
|
@ -265,11 +272,11 @@ var _ = Describe("Verify Create", func() {
|
||||||
|
|
||||||
var failed = false
|
var failed = false
|
||||||
var _ = AfterEach(func() {
|
var _ = AfterEach(func() {
|
||||||
failed = failed || CurrentGinkgoTestDescription().Failed
|
failed = failed || CurrentSpecReport().Failed()
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = AfterSuite(func() {
|
var _ = AfterSuite(func() {
|
||||||
if failed {
|
if failed && !*ci {
|
||||||
fmt.Println("FAILED!")
|
fmt.Println("FAILED!")
|
||||||
} else {
|
} else {
|
||||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||||
|
|
Loading…
Reference in New Issue