Browse Source

Added optional docker hub login

Signed-off-by: Derek Nola <derek.nola@suse.com>
pull/6131/head
Derek Nola 2 years ago
parent
commit
f88cd3bfb6
  1. 21
      tests/e2e/testutils.go
  2. 5
      tests/e2e/validatecluster/validatecluster_test.go

21
tests/e2e/testutils.go

@ -111,7 +111,7 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
return nil
})
// We must wait a bit between provisioning nodes to avoid too many learners attempting to join the cluster
time.Sleep(30 * time.Second)
time.Sleep(20 * time.Second)
}
if err := errg.Wait(); err != nil {
return nil, nil, err
@ -288,11 +288,11 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
return nodes, nil
}
func ParsePods(kubeconfig string, print bool) ([]Pod, error) {
func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
pods := make([]Pod, 0, 10)
podList := ""
cmd := "kubectl get pods -o wide --no-headers -A --kubeconfig=" + kubeconfig
cmd := "kubectl get pods -o wide --no-headers -A --kubeconfig=" + kubeConfig
res, _ := RunCommand(cmd)
res = strings.TrimSpace(res)
podList = res
@ -328,6 +328,21 @@ func RestartCluster(nodeNames []string) error {
return nil
}
// DockerLogin authenticates to the docker registry for increased pull limits
func DockerLogin(kubeConfig string, ci bool) error {
if !ci {
return nil
}
// Authenticate to docker hub to increade pull limit
cmd := fmt.Sprintf("kubectl create secret docker-registry regcred --from-file=%s --type=kubernetes.io/dockerconfigjson --kubeconfig=%s",
"../amd64_resource_files/docker_cred.json", kubeConfig)
res, err := RunCommand(cmd)
if err != nil {
return fmt.Errorf("failed to create docker registry secret: %s : %v", res, err)
}
return nil
}
// RunCmdOnNode executes a command from within the given node
func RunCmdOnNode(cmd string, nodename string) (string, error) {
runcmd := "vagrant ssh -c \"" + cmd + "\" " + nodename

5
tests/e2e/validatecluster/validatecluster_test.go

@ -19,7 +19,7 @@ var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var alwaysKill = flag.Bool("alwaysKill", false, "alaways destroy VMs even on test failure")
var ci = flag.Bool("ci", false, "running on CI")
// Environment Variables Info:
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
@ -49,6 +49,7 @@ var _ = Describe("Verify Create", Ordered, func() {
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(e2e.DockerLogin(kubeConfigFile, *ci)).To(Succeed())
})
It("Checks Node and Pod Status", func() {
@ -270,7 +271,7 @@ var _ = AfterEach(func() {
})
var _ = AfterSuite(func() {
if failed && !*alwaysKill {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())

Loading…
Cancel
Save