Modify e2e/lifecycle tests to import e2elog.Logf

Signed-off-by: Jiatong Wang <wangjiatong@vmware.com>
k3s-v1.15.3
Jiatong Wang 2019-05-07 00:21:44 -07:00
parent 8ea9edbb02
commit b26b6f0d5c
6 changed files with 30 additions and 24 deletions

View File

@ -35,6 +35,7 @@ go_library(
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/lifecycle:go_default_library", "//test/e2e/framework/lifecycle:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/upgrades:go_default_library", "//test/e2e/upgrades:go_default_library",
"//test/e2e/upgrades/apps:go_default_library", "//test/e2e/upgrades/apps:go_default_library",
"//test/e2e/upgrades/storage:go_default_library", "//test/e2e/upgrades/storage:go_default_library",

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -297,7 +298,7 @@ var _ = SIGDescribe("Addon update", func() {
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists))
// Delete the "ensure exist class" addon at the end. // Delete the "ensure exist class" addon at the end.
defer func() { defer func() {
framework.Logf("Cleaning up ensure exist class addon.") e2elog.Logf("Cleaning up ensure exist class addon.")
gomega.Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(gomega.HaveOccurred()) gomega.Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(gomega.HaveOccurred())
}() }()
@ -389,7 +390,7 @@ func sshExecAndVerify(client *ssh.Client, cmd string) {
} }
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) e2elog.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
session, err := client.NewSession() session, err := client.NewSession()
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err) return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
@ -421,7 +422,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
} }
func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error { func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) e2elog.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession() session, err := sshClient.NewSession()
if err != nil { if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err) return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)

View File

@ -28,10 +28,11 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
func addMasterReplica(zone string) error { func addMasterReplica(zone string) error {
framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone)) e2elog.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false") _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
if err != nil { if err != nil {
return err return err
@ -40,7 +41,7 @@ func addMasterReplica(zone string) error {
} }
func removeMasterReplica(zone string) error { func removeMasterReplica(zone string) error {
framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone)) e2elog.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false") _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
if err != nil { if err != nil {
return err return err
@ -49,7 +50,7 @@ func removeMasterReplica(zone string) error {
} }
func addWorkerNodes(zone string) error { func addWorkerNodes(zone string) error {
framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone)) e2elog.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true") _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
if err != nil { if err != nil {
return err return err
@ -58,7 +59,7 @@ func addWorkerNodes(zone string) error {
} }
func removeWorkerNodes(zone string) error { func removeWorkerNodes(zone string) error {
framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone)) e2elog.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true") _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
if err != nil { if err != nil {
return err return err

View File

@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -108,7 +109,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
pass := true pass := true
for _, node := range originalNodes.Items { for _, node := range originalNodes.Items {
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err) e2elog.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
pass = false pass = false
} }
} }

View File

@ -22,7 +22,7 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -71,7 +72,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, e := range events.Items { for _, e := range events.Items {
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
} }
} }
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
@ -135,7 +136,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
nodelist := framework.GetReadySchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(c)
if hook != nil { if hook != nil {
defer func() { defer func() {
framework.Logf("Executing termination hook on nodes") e2elog.Logf("Executing termination hook on nodes")
hook(framework.TestContext.Provider, nodelist) hook(framework.TestContext.Provider, nodelist)
}() }()
} }
@ -162,7 +163,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
for ix := range nodelist.Items { for ix := range nodelist.Items {
n := nodelist.Items[ix] n := nodelist.Items[ix]
if !result[ix] { if !result[ix] {
framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name) e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
} }
} }
framework.Failf("Test failed; at least one node failed to reboot in the time given.") framework.Failf("Test failed; at least one node failed to reboot in the time given.")
@ -176,9 +177,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
prefix = "Retrieving log for the last terminated container" prefix = "Retrieving log for the last terminated container"
} }
if err != nil { if err != nil {
framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) e2elog.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
} else { } else {
framework.Logf("%s %s:\n%s\n", prefix, id, log) e2elog.Logf("%s %s:\n%s\n", prefix, id, log)
} }
} }
podNameSet := sets.NewString(podNames...) podNameSet := sets.NewString(podNames...)
@ -192,7 +193,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
if ok, _ := testutils.PodRunningReady(p); ok { if ok, _ := testutils.PodRunningReady(p); ok {
continue continue
} }
framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) e2elog.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
// Print the log of the containers if pod is not running and ready. // Print the log of the containers if pod is not running and ready.
for _, container := range p.Status.ContainerStatuses { for _, container := range p.Status.ContainerStatuses {
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
@ -221,16 +222,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
ns := metav1.NamespaceSystem ns := metav1.NamespaceSystem
ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
if err != nil { if err != nil {
framework.Logf("Couldn't initialize pod store: %v", err) e2elog.Logf("Couldn't initialize pod store: %v", err)
return false return false
} }
defer ps.Stop() defer ps.Stop()
// Get the node initially. // Get the node initially.
framework.Logf("Getting %s", name) e2elog.Logf("Getting %s", name)
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Couldn't get node %s", name) e2elog.Logf("Couldn't get node %s", name)
return false return false
} }
@ -255,7 +256,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
podNames = append(podNames, p.ObjectMeta.Name) podNames = append(podNames, p.ObjectMeta.Name)
} }
} }
framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames) e2elog.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)
// For each pod, we do a sanity check to ensure it's running / healthy // For each pod, we do a sanity check to ensure it's running / healthy
// or succeeded now, as that's what we'll be checking later. // or succeeded now, as that's what we'll be checking later.
@ -266,7 +267,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
// Reboot the node. // Reboot the node.
if err = framework.IssueSSHCommand(rebootCmd, provider, node); err != nil { if err = framework.IssueSSHCommand(rebootCmd, provider, node); err != nil {
framework.Logf("Error while issuing ssh command: %v", err) e2elog.Logf("Error while issuing ssh command: %v", err)
return false return false
} }
@ -288,7 +289,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
return false return false
} }
framework.Logf("Reboot successful on node %s", name) e2elog.Logf("Reboot successful on node %s", name)
return true return true
} }
@ -299,7 +300,7 @@ func catLogHook(logPath string) terminationHook {
for _, n := range nodes.Items { for _, n := range nodes.Items {
cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath) cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath)
if _, err := framework.IssueSSHCommandWithResult(cmd, provider, &n); err != nil { if _, err := framework.IssueSSHCommandWithResult(cmd, provider, &n); err != nil {
framework.Logf("Error while issuing ssh command: %v", err) e2elog.Logf("Error while issuing ssh command: %v", err)
} }
} }

View File

@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -61,7 +62,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
ginkgo.By("ensuring all nodes are ready") ginkgo.By("ensuring all nodes are ready")
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes)) e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
ginkgo.By("ensuring all pods are running and ready") ginkgo.By("ensuring all pods are running and ready")
allPods := ps.List() allPods := ps.List()
@ -91,7 +92,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
ginkgo.By("ensuring all nodes are ready after the restart") ginkgo.By("ensuring all nodes are ready after the restart")
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout) nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter)) e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
// Make sure that we have the same number of nodes. We're not checking // Make sure that we have the same number of nodes. We're not checking
// that the names match because that's implementation specific. // that the names match because that's implementation specific.