mirror of https://github.com/k3s-io/k3s
[Release-1.25] Add private registry e2e test (#7722)
* add private registry e2e test (#7653) add private registry e2e test Signed-off-by: Ian Cardoso <osodracnai@gmail.com> Co-authored-by: Derek Nola <derek.nola@suse.com> * E2E: Startup test cleanup + RunCommand Enhancement (#7388) * Add beforesuite to startup * Reduce timeouts for startup * Fix cleanup + set kubeconfig Signed-off-by: Derek Nola <derek.nola@suse.com> --------- Signed-off-by: Ian Cardoso <osodracnai@gmail.com> Signed-off-by: Derek Nola <derek.nola@suse.com> Co-authored-by: Derek Nola <derek.nola@suse.com>pull/7752/head
parent
f089761921
commit
077fd8b9b6
|
@ -622,6 +622,7 @@ steps:
|
|||
- vagrant destroy -f
|
||||
- go test -v -timeout=30m ./secretsencryption_test.go -ci -local
|
||||
- cd ../upgradecluster
|
||||
- vagrant destroy -f
|
||||
- E2E_RELEASE_CHANNEL="v1.25" go test -v -timeout=45m ./upgradecluster_test.go -ci -local
|
||||
- docker stop registry && docker rm registry
|
||||
|
||||
|
|
|
@ -49,6 +49,14 @@ Install the necessary vagrant plugins with the following command:
|
|||
```bash
|
||||
vagrant plugin install vagrant-libvirt vagrant-scp vagrant-k3s vagrant-reload
|
||||
```
|
||||
### Kubectl
|
||||
|
||||
For linux
|
||||
```bash
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
```
|
||||
If it does not work, or you are on a different system, check the [official tutorial](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
|
||||
|
||||
## Running
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if pod.Node == "agent-0" {
|
||||
Expect(pod.NodeIP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name)
|
||||
Expect(pod.IP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -260,7 +260,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if pod.Node == "agent-0" {
|
||||
Expect(pod.NodeIP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name)
|
||||
Expect(pod.IP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
ENV['VAGRANT_NO_PARALLEL'] = 'no'
|
||||
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
|
||||
["server-0", "server-1", "server-2", "agent-0", "agent-1"])
|
||||
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
|
||||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
|
||||
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
|
||||
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
|
||||
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
|
||||
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
|
||||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
|
||||
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
|
||||
NETWORK_PREFIX = "10.10.10"
|
||||
install_type = ""
|
||||
|
||||
def provision(vm, role, role_num, node_num)
|
||||
vm.box = NODE_BOXES[node_num]
|
||||
vm.hostname = role
|
||||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
|
||||
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
|
||||
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"
|
||||
|
||||
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
|
||||
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
|
||||
load vagrant_defaults
|
||||
|
||||
defaultOSConfigure(vm)
|
||||
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
|
||||
|
||||
vm.provision "shell", inline: "ping -c 2 k3s.io"
|
||||
|
||||
# The formatting on this is a little weird, but it allows inserting variables
|
||||
# and still using the heredoc formatting with escapped quotes
|
||||
writePrivateRegistry = <<~'SCRIPT'.chomp % {net: NETWORK_PREFIX}
|
||||
mkdir -p /etc/rancher/k3s/
|
||||
echo "mirrors:
|
||||
my-registry.local:
|
||||
endpoint:
|
||||
- \"http://%{net}.100:5000\"" > /etc/rancher/k3s/registries.yaml
|
||||
SCRIPT
|
||||
|
||||
setInsecureRegistryPolicy = <<~'SCRIPT'.chomp % {net: NETWORK_PREFIX}
|
||||
mkdir -p /etc/docker/
|
||||
echo "{ \"insecure-registries\" : [\"%{net}.100:5000\"] }" > /etc/docker/daemon.json
|
||||
SCRIPT
|
||||
|
||||
if role.include?("server") && role_num == 0
|
||||
vm.provision "insecure-registry", type: "shell", inline: setInsecureRegistryPolicy
|
||||
vm.provision "private-registry", type: "shell", inline: writePrivateRegistry
|
||||
dockerInstall(vm)
|
||||
|
||||
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.args = "server "
|
||||
k3s.config = <<~YAML
|
||||
token: vagrant
|
||||
node-external-ip: #{NETWORK_PREFIX}.100
|
||||
flannel-iface: eth1
|
||||
cluster-init: true
|
||||
YAML
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
|
||||
elsif role.include?("server") && role_num != 0
|
||||
vm.provision "shell", inline: writePrivateRegistry
|
||||
|
||||
vm.provision 'k3s-secondary-server', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.args = "server"
|
||||
k3s.config = <<~YAML
|
||||
server: "https://#{NETWORK_PREFIX}.100:6443"
|
||||
token: vagrant
|
||||
node-external-ip: #{node_ip}
|
||||
flannel-iface: eth1
|
||||
YAML
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
end
|
||||
|
||||
if role.include?("agent")
|
||||
vm.provision "shell", inline: writePrivateRegistry
|
||||
|
||||
vm.provision 'k3s-agent', type: 'k3s', run: 'once' do |k3s|
|
||||
k3s.args = "agent"
|
||||
k3s.config = <<~YAML
|
||||
server: "https://#{NETWORK_PREFIX}.100:6443"
|
||||
token: vagrant
|
||||
node-external-ip: #{node_ip}
|
||||
flannel-iface: eth1
|
||||
YAML
|
||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
|
||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||
end
|
||||
end
|
||||
if vm.box.to_s.include?("microos")
|
||||
vm.provision 'k3s-reload', type: 'reload', run: 'once'
|
||||
if !EXTERNAL_DB.empty?
|
||||
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
|
||||
# Default provider is libvirt, virtualbox is only provided as a backup
|
||||
config.vm.provider "libvirt" do |v|
|
||||
v.cpus = NODE_CPUS
|
||||
v.memory = NODE_MEMORY
|
||||
end
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.cpus = NODE_CPUS
|
||||
v.memory = NODE_MEMORY
|
||||
end
|
||||
|
||||
if NODE_ROLES.kind_of?(String)
|
||||
NODE_ROLES = NODE_ROLES.split(" ", -1)
|
||||
end
|
||||
if NODE_BOXES.kind_of?(String)
|
||||
NODE_BOXES = NODE_BOXES.split(" ", -1)
|
||||
end
|
||||
|
||||
# Must iterate on the index, vagrant does not understand iterating
|
||||
# over the node roles themselves
|
||||
NODE_ROLES.length.times do |i|
|
||||
name = NODE_ROLES[i]
|
||||
role_num = name.split("-", -1).pop.to_i
|
||||
config.vm.define name do |node|
|
||||
provision(node.vm, name, role_num, i)
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,162 @@
|
|||
package validatecluster
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Valid nodeOS:
|
||||
// generic/ubuntu2004, generic/centos7, generic/rocky8,
|
||||
// opensuse/Leap-15.3.x86_64
|
||||
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
|
||||
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
|
||||
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
|
||||
var ci = flag.Bool("ci", false, "running on CI")
|
||||
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
|
||||
|
||||
// Environment Variables Info:
|
||||
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
|
||||
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
|
||||
// E2E_REGISTRY: true/false (default: false)
|
||||
|
||||
func Test_E2EPrivateRegistry(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
flag.Parse()
|
||||
suiteConfig, reporterConfig := GinkgoConfiguration()
|
||||
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
var _ = Describe("Verify Create", Ordered, func() {
|
||||
Context("Cluster :", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
})
|
||||
|
||||
It("Create new private registry", func() {
|
||||
registry, err := e2e.RunCmdOnNode("sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2 ", serverNodeNames[0])
|
||||
fmt.Println(registry)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
})
|
||||
It("ensures registry is working", func() {
|
||||
a, err := e2e.RunCmdOnNode("sudo docker ps -a | grep registry\n", serverNodeNames[0])
|
||||
fmt.Println(a)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
})
|
||||
It("Should pull and image from dockerhub and send it to private registry", func() {
|
||||
cmd := "sudo docker pull nginx"
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
nodeIP, err := e2e.FetchNodeExternalIP(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cmd = "sudo docker tag nginx " + nodeIP + ":5000/my-webpage"
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
cmd = "sudo docker push " + nodeIP + ":5000/my-webpage"
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
cmd = "sudo docker image remove nginx " + nodeIP + ":5000/my-webpage"
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
})
|
||||
It("Should create and validate deployment with private registry on", func() {
|
||||
res, err := e2e.RunCmdOnNode("sudo kubectl create deployment my-webpage --image=my-registry.local/my-webpage", serverNodeNames[0])
|
||||
fmt.Println(res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var pod e2e.Pod
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
for _, p := range pods {
|
||||
if strings.Contains(p.Name, "my-webpage") {
|
||||
pod = p
|
||||
}
|
||||
}
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pod.Status).Should(Equal("Running"))
|
||||
g.Expect(pod.Node).Should(Equal(agentNodeNames[0]))
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
|
||||
cmd := "curl " + pod.IP
|
||||
Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).To(ContainSubstring("Welcome to nginx!"))
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
var failed bool
|
||||
var _ = AfterEach(func() {
|
||||
failed = failed || CurrentSpecReport().Failed()
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
|
||||
if failed && !*ci {
|
||||
fmt.Println("FAILED!")
|
||||
} else {
|
||||
r1, err := e2e.RunCmdOnNode("sudo docker rm -f registry", serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), r1)
|
||||
r2, err := e2e.RunCmdOnNode("sudo kubectl delete deployment my-webpage", serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred(), r2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
|
@ -82,17 +82,18 @@ func KillK3sCluster(nodes []string) error {
|
|||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 1)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 1)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
})
|
||||
|
||||
var _ = Describe("Various Startup Configurations", Ordered, func() {
|
||||
Context("Verify CRI-Dockerd :", func() {
|
||||
It("Stands up the nodes", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 1)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 1)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
})
|
||||
It("Starts K3s with no issues", func() {
|
||||
dockerYAML := "docker: true"
|
||||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), dockerYAML, dockerYAML)
|
||||
|
@ -114,7 +115,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
|
@ -128,7 +129,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
})
|
||||
It("Kills the cluster", func() {
|
||||
|
@ -158,7 +159,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
|
@ -172,7 +173,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
})
|
||||
It("Kills the cluster", func() {
|
||||
|
@ -202,7 +203,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
|
@ -216,21 +217,21 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
})
|
||||
|
||||
It("Returns pod metrics", func() {
|
||||
cmd := "kubectl top pod -A"
|
||||
Eventually(func() error {
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := e2e.RunCommand(cmd)
|
||||
return err
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
}, "600s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Returns node metrics", func() {
|
||||
cmd := "kubectl top node"
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -242,7 +243,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
|
||||
It("Collects logs from a pod", func() {
|
||||
cmd := "kubectl logs -n kube-system -l app.kubernetes.io/name=traefik -c traefik"
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ type Pod struct {
|
|||
Ready string
|
||||
Status string
|
||||
Restarts string
|
||||
NodeIP string
|
||||
IP string
|
||||
Node string
|
||||
}
|
||||
|
||||
|
@ -294,6 +294,9 @@ func GenKubeConfigFile(serverName string) (string, error) {
|
|||
if err := os.WriteFile(kubeConfigFile, []byte(kubeConfig), 0644); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := os.Setenv("E2E_KUBECONFIG", kubeConfigFile); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return kubeConfigFile, nil
|
||||
}
|
||||
|
||||
|
@ -372,16 +375,10 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
|
|||
return nodes, nil
|
||||
}
|
||||
|
||||
func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
|
||||
func formatPods(input string) ([]Pod, error) {
|
||||
pods := make([]Pod, 0, 10)
|
||||
podList := ""
|
||||
|
||||
cmd := "kubectl get pods -o wide --no-headers -A --kubeconfig=" + kubeConfig
|
||||
res, _ := RunCommand(cmd)
|
||||
res = strings.TrimSpace(res)
|
||||
podList = res
|
||||
|
||||
split := strings.Split(res, "\n")
|
||||
input = strings.TrimSpace(input)
|
||||
split := strings.Split(input, "\n")
|
||||
for _, rec := range split {
|
||||
fields := strings.Fields(string(rec))
|
||||
if len(fields) < 8 {
|
||||
|
@ -393,11 +390,25 @@ func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
|
|||
Ready: fields[2],
|
||||
Status: fields[3],
|
||||
Restarts: fields[4],
|
||||
NodeIP: fields[6],
|
||||
IP: fields[6],
|
||||
Node: fields[7],
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
|
||||
podList := ""
|
||||
|
||||
cmd := "kubectl get pods -o wide --no-headers -A"
|
||||
res, _ := RunCommand(cmd)
|
||||
podList = strings.TrimSpace(res)
|
||||
|
||||
pods, err := formatPods(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if print {
|
||||
fmt.Println(podList)
|
||||
}
|
||||
|
@ -450,9 +461,11 @@ func RunCmdOnNode(cmd string, nodename string) (string, error) {
|
|||
return out, nil
|
||||
}
|
||||
|
||||
// RunCommand executes a command on the host
|
||||
func RunCommand(cmd string) (string, error) {
|
||||
c := exec.Command("bash", "-c", cmd)
|
||||
if kc, ok := os.LookupEnv("E2E_KUBECONFIG"); ok {
|
||||
c.Env = append(os.Environ(), "KUBECONFIG="+kc)
|
||||
}
|
||||
out, err := c.CombinedOutput()
|
||||
return string(out), err
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue